Merge commit 'v3.0-rc7' into android-3.0
diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644
index 0000000..72a62af
--- /dev/null
+++ b/Documentation/android.txt
@@ -0,0 +1,121 @@
+				=============
+				A N D R O I D
+				=============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+ANDROID_RAM_CONSOLE
+ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
index 9ad85df..3419707 100644
--- a/Documentation/cgroups/cpuacct.txt
+++ b/Documentation/cgroups/cpuacct.txt
@@ -39,6 +39,13 @@
 
 user and system are in USER_HZ unit.
 
+cpuacct.cpufreq file gives CPU time (in nanoseconds) spent at each CPU
+frequency. Platform hooks must be implemented inorder to properly track
+time at each CPU frequency.
+
+cpuacct.power file gives CPU power consumed (in milliWatt seconds). Platform
+must provide and implement power callback functions.
+
 cpuacct controller uses percpu_counter interface to collect user and
 system times. This has two side effects:
 
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index e74d0a2..16799ce 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -28,6 +28,7 @@
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -193,6 +194,41 @@
 default value of '20' it means that if the CPU usage needs to be below
 20% between samples to have the frequency decreased.
 
+
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors.  However,
+the governor is more aggressive about scaling the CPU speed up in
+response to CPU-intensive activity.
+
+Sampling the CPU load every X ms can lead to under-powering the CPU
+for X ms, leading to dropped frames, stuttering UI, etc.  Instead of
+sampling the cpu at a specified rate, the interactive governor will
+check whether to scale the cpu frequency up soon after coming out of
+idle.  When the cpu comes out of idle, a timer is configured to fire
+within 1-2 ticks.  If the cpu is very busy between exiting idle and
+when the timer fires then we assume the cpu is underpowered and ramp
+to MAX speed.
+    
+If the cpu was not sufficiently busy to immediately ramp to MAX speed,
+then governor evaluates the cpu load since the last speed adjustment,
+choosing th highest value between that longer-term load or the
+short-term load since idle exit to determine the cpu speed to ramp to.
+
+The tuneable value for this governor are:
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. This is to ensure that the governor has
+seen enough historic cpu load data to determine the appropriate
+workload.  Default is 80000 uS.
+
+go_maxspeed_load: The CPU load at which to ramp to max speed.  Default
+is 85.
+
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 940b201..e58603b 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -656,6 +656,8 @@
 @		b	__arm6_mmu_cache_off
 @		b	__armv3_mmu_cache_flush
 
+#if !defined(CONFIG_CPU_V7)
+		/* This collides with some V7 IDs, preventing correct detection */
 		.word	0x00000000		@ old ARM ID
 		.word	0x0000f000
 		mov	pc, lr
@@ -664,6 +666,7 @@
  THUMB(		nop				)
 		mov	pc, lr
  THUMB(		nop				)
+#endif
 
 		.word	0x41007000		@ ARM7/710
 		.word	0xfff8fe00
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 4b71766..cf82a88 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -39,3 +39,54 @@
 
 config SHARP_SCOOP
 	bool
+
+config FIQ_GLUE
+	bool
+	select FIQ
+
+config FIQ_DEBUGGER
+	bool "FIQ Mode Serial Debugger"
+	select FIQ
+	select FIQ_GLUE
+	select KERNEL_DEBUGGER_CORE
+	default n
+	help
+	  The FIQ serial debugger can accept commands even when the
+	  kernel is unresponsive due to being stuck with interrupts
+	  disabled.  Depends on the kernel debugger core in drivers/misc.
+
+
+config FIQ_DEBUGGER_NO_SLEEP
+	bool "Keep serial debugger active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables the serial debugger at boot. Passing
+	  fiq_debugger.no_sleep on the kernel commandline will
+	  override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+	bool "Don't disable wakeup IRQ when debugger is active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Don't disable the wakeup irq when enabling the uart clock.  This will
+	  cause extra interrupts, but it makes the serial debugger usable with
+	  on some MSM radio builds that ignore the uart clock request in power
+	  collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+	bool "Console on FIQ Serial Debugger port"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables a console so that printk messages are displayed on
+	  the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+	bool "Put the FIQ debugger into console mode by default"
+	depends on FIQ_DEBUGGER_CONSOLE
+	default n
+	help
+	  If enabled, this puts the fiq debugger into console mode by default.
+	  Otherwise, the fiq debugger will start out in debug mode.
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 6ea9b6f..3ab5d76 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -17,3 +17,5 @@
 obj-$(CONFIG_ARCH_IXP23XX)	+= uengine.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_ARM_TIMER_SP804)	+= timer-sp.o
+obj-$(CONFIG_FIQ_GLUE)		+= fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_FIQ_DEBUGGER)	+= fiq_debugger.o
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
new file mode 100644
index 0000000..080f69e
--- /dev/null
+++ b/arch/arm/common/fiq_debugger.c
@@ -0,0 +1,942 @@
+/*
+ * arch/arm/common/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_debugger.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#include <asm/fiq_debugger.h>
+#include <asm/fiq_glue.h>
+#include <asm/stacktrace.h>
+
+#include <mach/system.h>
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+		((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_state {
+	struct fiq_glue_handler handler;
+
+	int fiq;
+	int signal_irq;
+	int wakeup_irq;
+	bool wakeup_irq_no_set_wake;
+	struct clk *clk;
+	struct fiq_debugger_pdata *pdata;
+	struct platform_device *pdev;
+
+	char debug_cmd[DEBUG_MAX];
+	int debug_busy;
+	int debug_abort;
+
+	char debug_buf[DEBUG_MAX];
+	int debug_count;
+
+	bool no_sleep;
+	bool debug_enable;
+	bool ignore_next_wakeup_irq;
+	struct timer_list sleep_timer;
+	bool uart_clk_enabled;
+	struct wake_lock debugger_wake_lock;
+	bool console_enable;
+	int current_cpu;
+	atomic_t unhandled_fiq_count;
+	bool in_fiq;
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+	struct console console;
+	struct tty_driver *tty_driver;
+	struct tty_struct *tty;
+	int tty_open_count;
+	struct fiq_debugger_ringbuf *tty_rbuf;
+#endif
+
+	unsigned int last_irqs[NR_IRQS];
+	unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+	if (state->wakeup_irq < 0)
+		return;
+	enable_irq(state->wakeup_irq);
+	if (!state->wakeup_irq_no_set_wake)
+		enable_irq_wake(state->wakeup_irq);
+}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+	if (state->wakeup_irq < 0)
+		return;
+	disable_irq_nosync(state->wakeup_irq);
+	if (!state->wakeup_irq_no_set_wake)
+		disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static void debug_force_irq(struct fiq_debugger_state *state)
+{
+	unsigned int irq = state->signal_irq;
+	if (state->pdata->force_irq)
+		state->pdata->force_irq(state->pdev, irq);
+	else {
+		struct irq_chip *chip = irq_get_chip(irq);
+		if (chip && chip->irq_retrigger)
+			chip->irq_retrigger(irq_get_irq_data(irq));
+	}
+}
+
+static void debug_uart_flush(struct fiq_debugger_state *state)
+{
+	if (state->pdata->uart_flush)
+		state->pdata->uart_flush(state->pdev);
+}
+
+static void debug_puts(struct fiq_debugger_state *state, char *s)
+{
+	unsigned c;
+	while ((c = *s++)) {
+		if (c == '\n')
+			state->pdata->uart_putc(state->pdev, '\r');
+		state->pdata->uart_putc(state->pdev, c);
+	}
+}
+
+static void debug_prompt(struct fiq_debugger_state *state)
+{
+	debug_puts(state, "debug> ");
+}
+
+int log_buf_copy(char *dest, int idx, int len);
+static void dump_kernel_log(struct fiq_debugger_state *state)
+{
+	char buf[1024];
+	int idx = 0;
+	int ret;
+	int saved_oip;
+
+	/* setting oops_in_progress prevents log_buf_copy()
+	 * from trying to take a spinlock which will make it
+	 * very unhappy in some cases...
+	 */
+	saved_oip = oops_in_progress;
+	oops_in_progress = 1;
+	for (;;) {
+		ret = log_buf_copy(buf, idx, 1023);
+		if (ret <= 0)
+			break;
+		buf[ret] = 0;
+		debug_puts(state, buf);
+		idx += ret;
+	}
+	oops_in_progress = saved_oip;
+}
+
+static char *mode_name(unsigned cpsr)
+{
+	switch (cpsr & MODE_MASK) {
+	case USR_MODE: return "USR";
+	case FIQ_MODE: return "FIQ";
+	case IRQ_MODE: return "IRQ";
+	case SVC_MODE: return "SVC";
+	case ABT_MODE: return "ABT";
+	case UND_MODE: return "UND";
+	case SYSTEM_MODE: return "SYS";
+	default: return "???";
+	}
+}
+
+static int debug_printf(void *cookie, const char *fmt, ...)
+{
+	struct fiq_debugger_state *state = cookie;
+	char buf[256];
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	debug_puts(state, buf);
+	return state->debug_abort;
+}
+
+/* Safe outside fiq context */
+static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+	struct fiq_debugger_state *state = cookie;
+	char buf[256];
+	va_list ap;
+	unsigned long irq_flags;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, 128, fmt, ap);
+	va_end(ap);
+
+	local_irq_save(irq_flags);
+	debug_puts(state, buf);
+	debug_uart_flush(state);
+	local_irq_restore(irq_flags);
+	return state->debug_abort;
+}
+
+static void dump_regs(struct fiq_debugger_state *state, unsigned *regs)
+{
+	debug_printf(state, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+			regs[0], regs[1], regs[2], regs[3]);
+	debug_printf(state, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+			regs[4], regs[5], regs[6], regs[7]);
+	debug_printf(state, " r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+			regs[8], regs[9], regs[10], regs[11],
+			mode_name(regs[16]));
+	if ((regs[16] & MODE_MASK) == USR_MODE)
+		debug_printf(state, " ip %08x  sp %08x  lr %08x  pc %08x  "
+				"cpsr %08x\n", regs[12], regs[13], regs[14],
+				regs[15], regs[16]);
+	else
+		debug_printf(state, " ip %08x  sp %08x  lr %08x  pc %08x  "
+				"cpsr %08x  spsr %08x\n", regs[12], regs[13],
+				regs[14], regs[15], regs[16], regs[17]);
+}
+
+struct mode_regs {
+	unsigned long sp_svc;
+	unsigned long lr_svc;
+	unsigned long spsr_svc;
+
+	unsigned long sp_abt;
+	unsigned long lr_abt;
+	unsigned long spsr_abt;
+
+	unsigned long sp_und;
+	unsigned long lr_und;
+	unsigned long spsr_und;
+
+	unsigned long sp_irq;
+	unsigned long lr_irq;
+	unsigned long spsr_irq;
+
+	unsigned long r8_fiq;
+	unsigned long r9_fiq;
+	unsigned long r10_fiq;
+	unsigned long r11_fiq;
+	unsigned long r12_fiq;
+	unsigned long sp_fiq;
+	unsigned long lr_fiq;
+	unsigned long spsr_fiq;
+};
+
+void __naked get_mode_regs(struct mode_regs *regs)
+{
+	asm volatile (
+	"mrs	r1, cpsr\n"
+	"msr	cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r8 - r14}\n"
+	"mrs	r2, spsr\n"
+	"stmia	r0!, {r2}\n"
+	"msr	cpsr_c, r1\n"
+	"bx	lr\n");
+}
+
+
+static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs)
+{
+	struct mode_regs mode_regs;
+	dump_regs(state, regs);
+	get_mode_regs(&mode_regs);
+	debug_printf(state, " svc: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+	debug_printf(state, " abt: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+	debug_printf(state, " und: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+	debug_printf(state, " irq: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+	debug_printf(state, " fiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  "
+			"r12 %08x\n",
+			mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+			mode_regs.r11_fiq, mode_regs.r12_fiq);
+	debug_printf(state, " fiq: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+static void dump_irqs(struct fiq_debugger_state *state)
+{
+	int n;
+	unsigned int cpu;
+
+	debug_printf(state, "irqnr       total  since-last   status  name\n");
+	for (n = 0; n < NR_IRQS; n++) {
+		struct irqaction *act = irq_desc[n].action;
+		if (!act && !kstat_irqs(n))
+			continue;
+		debug_printf(state, "%5d: %10u %11u %8x  %s\n", n,
+			kstat_irqs(n),
+			kstat_irqs(n) - state->last_irqs[n],
+			irq_desc[n].status_use_accessors,
+			(act && act->name) ? act->name : "???");
+		state->last_irqs[n] = kstat_irqs(n);
+	}
+
+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+
+		debug_printf(state, "LOC %d: %10u %11u\n", cpu,
+			     __IRQ_STAT(cpu, local_timer_irqs),
+			     __IRQ_STAT(cpu, local_timer_irqs) -
+			     state->last_local_timer_irqs[cpu]);
+		state->last_local_timer_irqs[cpu] =
+			__IRQ_STAT(cpu, local_timer_irqs);
+	}
+}
+
+struct stacktrace_state {
+	struct fiq_debugger_state *state;
+	unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+	struct stacktrace_state *sts = d;
+
+	if (sts->depth) {
+		debug_printf(sts->state,
+			"  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+			frame->pc, frame->pc, frame->lr, frame->lr,
+			frame->sp, frame->fp);
+		sts->depth--;
+		return 0;
+	}
+	debug_printf(sts->state, "  ...\n");
+
+	return sts->depth == 0;
+}
+
+struct frame_tail {
+	struct frame_tail *fp;
+	unsigned long sp;
+	unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_state *state,
+					struct frame_tail *tail)
+{
+	struct frame_tail buftail[2];
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+		debug_printf(state, "  invalid frame pointer %p\n", tail);
+		return NULL;
+	}
+	if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+		debug_printf(state,
+			"  failed to copy frame pointer %p\n", tail);
+		return NULL;
+	}
+
+	debug_printf(state, "  %p\n", buftail[0].lr);
+
+	/* frame pointers should strictly progress back up the stack
+	 * (towards higher addresses) */
+	if (tail >= buftail[0].fp)
+		return NULL;
+
+	return buftail[0].fp-1;
+}
+
+void dump_stacktrace(struct fiq_debugger_state *state,
+		struct pt_regs * const regs, unsigned int depth, void *ssp)
+{
+	struct frame_tail *tail;
+	struct thread_info *real_thread_info = THREAD_INFO(ssp);
+	struct stacktrace_state sts;
+
+	sts.depth = depth;
+	sts.state = state;
+	*current_thread_info() = *real_thread_info;
+
+	if (!current)
+		debug_printf(state, "current NULL\n");
+	else
+		debug_printf(state, "pid: %d  comm: %s\n",
+			current->pid, current->comm);
+	dump_regs(state, (unsigned *)regs);
+
+	if (!user_mode(regs)) {
+		struct stackframe frame;
+		frame.fp = regs->ARM_fp;
+		frame.sp = regs->ARM_sp;
+		frame.lr = regs->ARM_lr;
+		frame.pc = regs->ARM_pc;
+		debug_printf(state,
+			"  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+			regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+			regs->ARM_sp, regs->ARM_fp);
+		walk_stackframe(&frame, report_trace, &sts);
+		return;
+	}
+
+	tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+	while (depth-- && tail && !((unsigned long) tail & 3))
+		tail = user_backtrace(state, tail);
+}
+
+static void debug_help(struct fiq_debugger_state *state)
+{
+	debug_printf(state,	"FIQ Debugger commands:\n"
+				" pc            PC status\n"
+				" regs          Register dump\n"
+				" allregs       Extended Register dump\n"
+				" bt            Stack trace\n"
+				" reboot        Reboot\n"
+				" irqs          Interupt status\n"
+				" kmsg          Kernel log\n"
+				" version       Kernel version\n");
+	debug_printf(state,	" sleep         Allow sleep while in FIQ\n"
+				" nosleep       Disable sleep while in FIQ\n"
+				" console       Switch terminal to console\n"
+				" cpu           Current CPU\n"
+				" cpu <number>  Switch to CPU<number>\n");
+	if (!state->debug_busy) {
+		strcpy(state->debug_cmd, "help");
+		state->debug_busy = 1;
+		debug_force_irq(state);
+	}
+}
+
+static void debug_exec(struct fiq_debugger_state *state,
+			const char *cmd, unsigned *regs, void *svc_sp)
+{
+	if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+		debug_help(state);
+	} else if (!strcmp(cmd, "pc")) {
+		debug_printf(state, " pc %08x cpsr %08x mode %s\n",
+			regs[15], regs[16], mode_name(regs[16]));
+	} else if (!strcmp(cmd, "regs")) {
+		dump_regs(state, regs);
+	} else if (!strcmp(cmd, "allregs")) {
+		dump_allregs(state, regs);
+	} else if (!strcmp(cmd, "bt")) {
+		dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp);
+	} else if (!strcmp(cmd, "reboot")) {
+		arch_reset(0, 0);
+	} else if (!strcmp(cmd, "irqs")) {
+		dump_irqs(state);
+	} else if (!strcmp(cmd, "kmsg")) {
+		dump_kernel_log(state);
+	} else if (!strcmp(cmd, "version")) {
+		debug_printf(state, "%s\n", linux_banner);
+	} else if (!strcmp(cmd, "sleep")) {
+		state->no_sleep = false;
+	} else if (!strcmp(cmd, "nosleep")) {
+		state->no_sleep = true;
+	} else if (!strcmp(cmd, "console")) {
+		state->console_enable = true;
+		debug_printf(state, "console mode\n");
+	} else if (!strcmp(cmd, "cpu")) {
+		debug_printf(state, "cpu %d\n", state->current_cpu);
+	} else if (!strncmp(cmd, "cpu ", 4)) {
+		unsigned long cpu = 0;
+		if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+			state->current_cpu = cpu;
+		else
+			debug_printf(state, "invalid cpu\n");
+		debug_printf(state, "cpu %d\n", state->current_cpu);
+	} else {
+		if (state->debug_busy) {
+			debug_printf(state,
+				"command processor busy. trying to abort.\n");
+			state->debug_abort = -1;
+		} else {
+			strcpy(state->debug_cmd, cmd);
+			state->debug_busy = 1;
+		}
+
+		debug_force_irq(state);
+
+		return;
+	}
+	if (!state->console_enable)
+		debug_prompt(state);
+}
+
+static void sleep_timer_expired(unsigned long data)
+{
+	struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+
+	if (state->uart_clk_enabled && !state->no_sleep) {
+		if (state->debug_enable) {
+			state->debug_enable = false;
+			debug_printf_nfiq(state, "suspending fiq debugger\n");
+		}
+		state->ignore_next_wakeup_irq = true;
+		if (state->clk)
+			clk_disable(state->clk);
+		state->uart_clk_enabled = false;
+		enable_wakeup_irq(state);
+	}
+	wake_unlock(&state->debugger_wake_lock);
+}
+
+static irqreturn_t wakeup_irq_handler(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+
+	if (!state->no_sleep)
+		debug_puts(state, "WAKEUP\n");
+	if (state->ignore_next_wakeup_irq)
+		state->ignore_next_wakeup_irq = false;
+	else if (!state->uart_clk_enabled) {
+		wake_lock(&state->debugger_wake_lock);
+		if (state->clk)
+			clk_enable(state->clk);
+		state->uart_clk_enabled = true;
+		disable_wakeup_irq(state);
+		mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t debug_irq(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+	if (state->pdata->force_irq_ack)
+		state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+	if (!state->no_sleep) {
+		wake_lock(&state->debugger_wake_lock);
+		mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+	}
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	if (state->tty) {
+		int i;
+		int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+		for (i = 0; i < count; i++) {
+			int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, i);
+			tty_insert_flip_char(state->tty, c, TTY_NORMAL);
+			if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+				pr_warn("fiq tty failed to consume byte\n");
+		}
+		tty_flip_buffer_push(state->tty);
+	}
+#endif
+	if (state->debug_busy) {
+		struct kdbg_ctxt ctxt;
+
+		ctxt.printf = debug_printf_nfiq;
+		ctxt.cookie = state;
+		kernel_debugger(&ctxt, state->debug_cmd);
+		debug_prompt(state);
+
+		state->debug_busy = 0;
+	}
+	return IRQ_HANDLED;
+}
+
+static int debug_getc(struct fiq_debugger_state *state)
+{
+	return state->pdata->uart_getc(state->pdev);
+}
+
+static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+{
+	struct fiq_debugger_state *state =
+		container_of(h, struct fiq_debugger_state, handler);
+	int c;
+	static int last_c;
+	int count = 0;
+	unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+
+	if (this_cpu != state->current_cpu) {
+		if (state->in_fiq)
+			return;
+
+		if (atomic_inc_return(&state->unhandled_fiq_count) !=
+					MAX_UNHANDLED_FIQ_COUNT)
+			return;
+
+		debug_printf(state, "fiq_debugger: cpu %d not responding, "
+			"reverting to cpu %d\n", state->current_cpu,
+			this_cpu);
+
+		atomic_set(&state->unhandled_fiq_count, 0);
+		state->current_cpu = this_cpu;
+		return;
+	}
+
+	state->in_fiq = true;
+
+	while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+		count++;
+		if (!state->debug_enable) {
+			if ((c == 13) || (c == 10)) {
+				state->debug_enable = true;
+				state->debug_count = 0;
+				debug_prompt(state);
+			}
+		} else if (c == FIQ_DEBUGGER_BREAK) {
+			state->console_enable = false;
+			debug_puts(state, "fiq debugger mode\n");
+			state->debug_count = 0;
+			debug_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+		} else if (state->console_enable && state->tty_rbuf) {
+			fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+			debug_force_irq(state);
+#endif
+		} else if ((c >= ' ') && (c < 127)) {
+			if (state->debug_count < (DEBUG_MAX - 1)) {
+				state->debug_buf[state->debug_count++] = c;
+				state->pdata->uart_putc(state->pdev, c);
+			}
+		} else if ((c == 8) || (c == 127)) {
+			if (state->debug_count > 0) {
+				state->debug_count--;
+				state->pdata->uart_putc(state->pdev, 8);
+				state->pdata->uart_putc(state->pdev, ' ');
+				state->pdata->uart_putc(state->pdev, 8);
+			}
+		} else if ((c == 13) || (c == 10)) {
+			if (c == '\r' || (c == '\n' && last_c != '\r')) {
+				state->pdata->uart_putc(state->pdev, '\r');
+				state->pdata->uart_putc(state->pdev, '\n');
+			}
+			if (state->debug_count) {
+				state->debug_buf[state->debug_count] = 0;
+				state->debug_count = 0;
+				debug_exec(state, state->debug_buf,
+					regs, svc_sp);
+			} else {
+				debug_prompt(state);
+			}
+		}
+		last_c = c;
+	}
+	debug_uart_flush(state);
+	if (state->pdata->fiq_ack)
+		state->pdata->fiq_ack(state->pdev, state->fiq);
+
+	/* poke sleep timer if necessary */
+	if (state->debug_enable && !state->no_sleep)
+		debug_force_irq(state);
+
+	atomic_set(&state->unhandled_fiq_count, 0);
+	state->in_fiq = false;
+}
+
+static void debug_resume(struct fiq_glue_handler *h)
+{
+	struct fiq_debugger_state *state =
+		container_of(h, struct fiq_debugger_state, handler);
+	if (state->pdata->uart_resume)
+		state->pdata->uart_resume(state->pdev);
+}
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *debug_console_device(struct console *co, int *index)
+{
+	struct fiq_debugger_state *state;
+	state = container_of(co, struct fiq_debugger_state, console);
+	*index = 0;
+	return state->tty_driver;
+}
+
+static void debug_console_write(struct console *co,
+				const char *s, unsigned int count)
+{
+	struct fiq_debugger_state *state;
+
+	state = container_of(co, struct fiq_debugger_state, console);
+
+	if (!state->console_enable)
+		return;
+
+	while (count--) {
+		if (*s == '\n')
+			state->pdata->uart_putc(state->pdev, '\r');
+		state->pdata->uart_putc(state->pdev, *s++);
+	}
+	debug_uart_flush(state);
+}
+
+static struct console fiq_debugger_console = {
+	.name = "ttyFIQ",
+	.device = debug_console_device,
+	.write = debug_console_write,
+	.flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	struct fiq_debugger_state *state = tty->driver->driver_state;
+	if (state->tty_open_count++)
+		return 0;
+
+	tty->driver_data = state;
+	state->tty = tty;
+	return 0;
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	struct fiq_debugger_state *state = tty->driver_data;
+	if (--state->tty_open_count)
+		return;
+	state->tty = NULL;
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	int i;
+	struct fiq_debugger_state *state = tty->driver_data;
+
+	if (!state->console_enable)
+		return count;
+
+	if (state->clk)
+		clk_enable(state->clk);
+	for (i = 0; i < count; i++)
+		state->pdata->uart_putc(state->pdev, *buf++);
+	if (state->clk)
+		clk_disable(state->clk);
+
+	return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+	return 1024;
+}
+
+static const struct tty_operations fiq_tty_driver_ops = {
+	.write = fiq_tty_write,
+	.write_room = fiq_tty_write_room,
+	.open = fiq_tty_open,
+	.close = fiq_tty_close,
+};
+
+static int fiq_debugger_tty_init(struct fiq_debugger_state *state)
+{
+	int ret = -EINVAL;
+
+	state->tty_driver = alloc_tty_driver(1);
+	if (!state->tty_driver) {
+		pr_err("Failed to allocate fiq debugger tty\n");
+		return -ENOMEM;
+	}
+
+	state->tty_driver->owner		= THIS_MODULE;
+	state->tty_driver->driver_name	= "fiq-debugger";
+	state->tty_driver->name		= "ttyFIQ";
+	state->tty_driver->type		= TTY_DRIVER_TYPE_SERIAL;
+	state->tty_driver->subtype	= SERIAL_TYPE_NORMAL;
+	state->tty_driver->init_termios	= tty_std_termios;
+	state->tty_driver->init_termios.c_cflag =
+					B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+	state->tty_driver->init_termios.c_ispeed =
+		state->tty_driver->init_termios.c_ospeed = 115200;
+	state->tty_driver->flags		= TTY_DRIVER_REAL_RAW;
+	tty_set_operations(state->tty_driver, &fiq_tty_driver_ops);
+	state->tty_driver->driver_state = state;
+
+	ret = tty_register_driver(state->tty_driver);
+	if (ret) {
+		pr_err("Failed to register fiq tty: %d\n", ret);
+		goto err;
+	}
+
+	state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+	if (!state->tty_rbuf) {
+		pr_err("Failed to allocate fiq debugger ringbuf\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	pr_info("Registered FIQ tty driver %p\n", state->tty_driver);
+	return 0;
+
+err:
+	fiq_debugger_ringbuf_free(state->tty_rbuf);
+	state->tty_rbuf = NULL;
+	put_tty_driver(state->tty_driver);
+	return ret;
+}
+#endif
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct fiq_debugger_state *state;
+
+	if (!pdata->uart_getc || !pdata->uart_putc || !pdata->fiq_enable)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	state->handler.fiq = debug_fiq;
+	state->handler.resume = debug_resume;
+	setup_timer(&state->sleep_timer, sleep_timer_expired,
+		    (unsigned long)state);
+	state->pdata = pdata;
+	state->pdev = pdev;
+	state->no_sleep = initial_no_sleep;
+	state->debug_enable = initial_debug_enable;
+	state->console_enable = initial_console_enable;
+
+	state->fiq = platform_get_irq_byname(pdev, "fiq");
+	state->signal_irq = platform_get_irq_byname(pdev, "signal");
+	state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+	if (state->wakeup_irq < 0)
+		state->no_sleep = true;
+	state->ignore_next_wakeup_irq = !state->no_sleep;
+
+	wake_lock_init(&state->debugger_wake_lock,
+			WAKE_LOCK_SUSPEND, "serial-debug");
+
+	state->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(state->clk))
+		state->clk = NULL;
+
+	if (state->clk)
+		clk_enable(state->clk);
+
+	if (pdata->uart_init) {
+		ret = pdata->uart_init(pdev);
+		if (ret)
+			goto err_uart_init;
+	}
+
+	debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
+				state->no_sleep ? "" : "twice ");
+
+	ret = fiq_glue_register_handler(&state->handler);
+	if (ret) {
+		pr_err("serial_debugger: could not install fiq handler\n");
+		goto err_register_fiq;
+	}
+
+	pdata->fiq_enable(pdev, state->fiq, 1);
+
+	if (state->clk)
+		clk_disable(state->clk);
+
+	ret = request_irq(state->signal_irq, debug_irq,
+			  IRQF_TRIGGER_RISING, "debug", state);
+	if (ret)
+		pr_err("serial_debugger: could not install signal_irq");
+
+	if (state->wakeup_irq >= 0) {
+		ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
+				  IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+				  "debug-wakeup", state);
+		if (ret) {
+			pr_err("serial_debugger: "
+				"could not install wakeup irq\n");
+			state->wakeup_irq = -1;
+		} else {
+			ret = enable_irq_wake(state->wakeup_irq);
+			if (ret) {
+				pr_err("serial_debugger: "
+					"could not enable wakeup\n");
+				state->wakeup_irq_no_set_wake = true;
+			}
+		}
+	}
+	if (state->no_sleep)
+		wakeup_irq_handler(state->wakeup_irq, state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	state->console = fiq_debugger_console;
+	register_console(&state->console);
+	fiq_debugger_tty_init(state);
+#endif
+	return 0;
+
+err_register_fiq:
+	if (pdata->uart_free)
+		pdata->uart_free(pdev);
+err_uart_init:
+	kfree(state);
+	if (state->clk)
+		clk_put(state->clk);
+	return ret;
+}
+
+static struct platform_driver fiq_debugger_driver = {
+	.probe = fiq_debugger_probe,
+	.driver.name = "fiq_debugger",
+};
+
+static int __init fiq_debugger_init(void)
+{
+	return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h
new file mode 100644
index 0000000..2649b55
--- /dev/null
+++ b/arch/arm/common/fiq_debugger_ringbuf.h
@@ -0,0 +1,94 @@
+/*
+ * arch/arm/common/fiq_debugger_ringbuf.c
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+	int len;
+	int head;
+	int tail;
+	u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+	struct fiq_debugger_ringbuf *rbuf;
+
+	rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+	if (rbuf == NULL)
+		return NULL;
+
+	rbuf->len = len;
+	rbuf->head = 0;
+	rbuf->tail = 0;
+	smp_mb();
+
+	return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+	kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+	int level = rbuf->head - rbuf->tail;
+
+	if (level < 0)
+		level = rbuf->len + level;
+
+	return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+	return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+	return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+	count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+	rbuf->tail = (rbuf->tail + count) % rbuf->len;
+	smp_mb();
+
+	return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+	if (fiq_debugger_ringbuf_room(rbuf) == 0)
+		return 0;
+
+	rbuf->buf[rbuf->head] = datum;
+	smp_mb();
+	rbuf->head = (rbuf->head + 1) % rbuf->len;
+	smp_mb();
+
+	return 1;
+}
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 0000000..9e3455a
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+		.text
+
+		.global fiq_glue_end
+
+		/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+		/* store pc, cpsr from previous mode */
+		mrs	r12, spsr
+		sub	r11, lr, #4
+		subs	r10, #1
+		bne	nested_fiq
+
+		stmfd	sp!, {r11-r12, lr}
+
+		/* store r8-r14 from previous mode */
+		sub	sp, sp, #(7 * 4)
+		stmia	sp, {r8-r14}^
+		nop
+
+		/* store r0-r7 from previous mode */
+		stmfd	sp!, {r0-r7}
+
+		/* setup func(data,regs) arguments */
+		mov	r0, r9
+		mov	r1, sp
+		mov	r3, r8
+
+		mov	r7, sp
+
+		/* Get sp and lr from non-user modes */
+		and	r4, r12, #MODE_MASK
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode
+
+		mov	r7, sp
+		orr	r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+		msr	cpsr_c, r4
+		str	sp, [r7, #(4 * 13)]
+		str	lr, [r7, #(4 * 14)]
+		mrs	r5, spsr
+		str	r5, [r7, #(4 * 17)]
+
+		cmp	r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		/* use fiq stack if we reenter this mode */
+		subne	sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+		msr	cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		mov	r2, sp
+		sub	sp, r7, #12
+		stmfd	sp!, {r2, ip, lr}
+		/* call func(data,regs) */
+		blx	r3
+		ldmfd	sp, {r2, ip, lr}
+		mov	sp, r2
+
+		/* restore/discard saved state */
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode_exit
+
+		msr	cpsr_c, r4
+		ldr	sp, [r7, #(4 * 13)]
+		ldr	lr, [r7, #(4 * 14)]
+		msr	spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+		msr	cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+		ldmfd	sp!, {r0-r7}
+		add	sp, sp, #(7 * 4)
+		ldmfd	sp!, {r11-r12, lr}
+exit_fiq:
+		msr	spsr_cxsf, r12
+		add	r10, #1
+		movs	pc, r11
+
+nested_fiq:
+		orr	r12, r12, #(PSR_F_BIT)
+		b	exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+		mrs		r3, cpsr
+		msr		cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+		movs		r8, r0
+		mov		r9, r1
+		mov		sp, r2
+		moveq		r10, #0
+		movne		r10, #1
+		msr		cpsr_c, r3
+		bx		lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644
index 0000000..4044c7d
--- /dev/null
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+	.name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+	struct fiq_glue_handler *handler = info;
+	fiq_glue_setup(handler->fiq, handler,
+		__get_cpu_var(fiq_stack) + THREAD_START_SP);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+	int ret;
+	int cpu;
+
+	if (!handler || !handler->fiq)
+		return -EINVAL;
+
+	mutex_lock(&fiq_glue_lock);
+	if (fiq_stack) {
+		ret = -EBUSY;
+		goto err_busy;
+	}
+
+	for_each_possible_cpu(cpu) {
+		void *stack;
+		stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+		if (WARN_ON(!stack)) {
+			ret = -ENOMEM;
+			goto err_alloc_fiq_stack;
+		}
+		per_cpu(fiq_stack, cpu) = stack;
+	}
+
+	ret = claim_fiq(&fiq_debbuger_fiq_handler);
+	if (WARN_ON(ret))
+		goto err_claim_fiq;
+
+	current_handler = handler;
+	on_each_cpu(fiq_glue_setup_helper, handler, true);
+	set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+	mutex_unlock(&fiq_glue_lock);
+	return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+	for_each_possible_cpu(cpu) {
+		__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+		per_cpu(fiq_stack, cpu) = NULL;
+	}
+err_busy:
+	mutex_unlock(&fiq_glue_lock);
+	return ret;
+}
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+	if (!current_handler)
+		return;
+	fiq_glue_setup(current_handler->fiq, current_handler,
+		__get_cpu_var(fiq_stack) + THREAD_START_SP);
+	if (current_handler->resume)
+		current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c..1252a26 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -249,7 +249,7 @@
  * Harvard caches are synchronised for the user space address range.
  * This is used for the ARM private sys_cacheflush system call.
  */
-#define flush_cache_user_range(vma,start,end) \
+#define flush_cache_user_range(start,end) \
 	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
 
 /*
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
new file mode 100644
index 0000000..e711b57
--- /dev/null
+++ b/arch/arm/include/asm/fiq_debugger.h
@@ -0,0 +1,46 @@
+/*
+ * arch/arm/include/asm/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME	"fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME	"signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME	"wakeup"
+
+struct fiq_debugger_pdata {
+	int (*uart_init)(struct platform_device *pdev);
+	void (*uart_free)(struct platform_device *pdev);
+	int (*uart_resume)(struct platform_device *pdev);
+	int (*uart_getc)(struct platform_device *pdev);
+	void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+	void (*uart_flush)(struct platform_device *pdev);
+
+	void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+								bool enable);
+	void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+	void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+	void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644
index 0000000..d54c29d
--- /dev/null
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+	void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+	void (*resume)(struct fiq_glue_handler *h);
+};
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793..6643d6c 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -17,15 +17,17 @@
 #define TRACER_ACCESSED_BIT	0
 #define TRACER_RUNNING_BIT	1
 #define TRACER_CYCLE_ACC_BIT	2
+#define TRACER_TRACE_DATA_BIT	3
 #define TRACER_ACCESSED		BIT(TRACER_ACCESSED_BIT)
 #define TRACER_RUNNING		BIT(TRACER_RUNNING_BIT)
 #define TRACER_CYCLE_ACC	BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA	BIT(TRACER_TRACE_DATA_BIT)
 
 #define TRACER_TIMEOUT 10000
 
-#define etm_writel(t, v, x) \
-	(__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+#define etm_writel(t, id, v, x) \
+	(__raw_writel((v), (t)->etm_regs[(id)] + (x)))
+#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
 
 /* CoreSight Management Registers */
 #define CSMR_LOCKACCESS 0xfb0
@@ -113,11 +115,19 @@
 #define ETMR_TRACEENCTRL	0x24
 #define ETMTE_INCLEXCL		BIT(24)
 #define ETMR_TRACEENEVT		0x20
+
+#define ETMR_VIEWDATAEVT	0x30
+#define ETMR_VIEWDATACTRL1	0x34
+#define ETMR_VIEWDATACTRL2	0x38
+#define ETMR_VIEWDATACTRL3	0x3c
+#define ETMVDC3_EXCLONLY	BIT(16)
+
 #define ETMCTRL_OPTS		(ETMCTRL_DO_CPRT | \
-				ETMCTRL_DATA_DO_ADDR | \
 				ETMCTRL_BRANCH_OUTPUT | \
 				ETMCTRL_DO_CONTEXTID)
 
+#define ETMR_TRACEIDR		0x200
+
 /* ETM management registers, "ETM Architecture", 3.5.24 */
 #define ETMMR_OSLAR	0x300
 #define ETMMR_OSLSR	0x304
@@ -140,14 +150,16 @@
 #define ETBFF_TRIGIN		BIT(8)
 #define ETBFF_TRIGEVT		BIT(9)
 #define ETBFF_TRIGFL		BIT(10)
+#define ETBFF_STOPFL		BIT(12)
 
 #define etb_writel(t, v, x) \
 	(__raw_writel((v), (t)->etb_regs + (x)))
 #define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
 
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
-	do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+	do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+	do { etm_writel((t), (id), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
 
 #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etb_unlock(t) \
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644
index 0000000..bca864a
--- /dev/null
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -0,0 +1,28 @@
+/*
+ *  arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
+
+struct mmc_platform_data {
+	unsigned int ocr_mask;			/* available voltages */
+	int built_in;				/* built-in device flag */
+	int card_present;			/* card detect state */
+	u32 (*translate_vdd)(struct device *, unsigned int);
+	unsigned int (*status)(struct device *);
+	struct embedded_sdio_data *embedded_sdio;
+	int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 90c62cd..2cd0076 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -496,7 +496,7 @@
 	blo	__und_usr_unknown
 3:	ldrht	r0, [r4]
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
-	orr	r0, r0, r5, lsl #16
+	orr    	r0, r0, r5, lsl #16
 #else
 	b	__und_usr_unknown
 #endif
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 1bec8b5..496b8b8 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/sysrq.h>
 #include <linux/device.h>
 #include <linux/clk.h>
@@ -36,26 +37,36 @@
 struct tracectx {
 	unsigned int	etb_bufsz;
 	void __iomem	*etb_regs;
-	void __iomem	*etm_regs;
+	void __iomem	**etm_regs;
+	int		etm_regs_count;
 	unsigned long	flags;
 	int		ncmppairs;
 	int		etm_portsz;
+	u32		etb_fc;
+	unsigned long	range_start;
+	unsigned long	range_end;
+	unsigned long	data_range_start;
+	unsigned long	data_range_end;
+	bool		dump_initial_etb;
 	struct device	*dev;
 	struct clk	*emu_clk;
 	struct mutex	mutex;
 };
 
-static struct tracectx tracer;
+static struct tracectx tracer = {
+	.range_start = (unsigned long)_stext,
+	.range_end = (unsigned long)_etext,
+};
 
 static inline bool trace_isrunning(struct tracectx *t)
 {
 	return !!(t->flags & TRACER_RUNNING);
 }
 
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
 		unsigned long start, unsigned long end, int exclude, int data)
 {
-	u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+	u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
 		    ETMAAT_NOVALCMP;
 
 	if (n < 1 || n > t->ncmppairs)
@@ -71,95 +82,155 @@
 		flags |= ETMAAT_IEXEC;
 
 	/* first comparator for the range */
-	etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
-	etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+	etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+	etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
 
 	/* second comparator is right next to it */
-	etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
-	etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
+	etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+	etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
 
-	flags = exclude ? ETMTE_INCLEXCL : 0;
-	etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+	if (data) {
+		flags = exclude ? ETMVDC3_EXCLONLY : 0;
+		if (exclude)
+			n += 8;
+		etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+	} else {
+		flags = exclude ? ETMTE_INCLEXCL : 0;
+		etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+	}
 
 	return 0;
 }
 
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
 {
 	u32 v;
 	unsigned long timeout = TRACER_TIMEOUT;
 
-	etb_unlock(t);
-
-	etb_writel(t, 0, ETBR_FORMATTERCTRL);
-	etb_writel(t, 1, ETBR_CTRL);
-
-	etb_lock(t);
-
-	/* configure etm */
 	v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
 
 	if (t->flags & TRACER_CYCLE_ACC)
 		v |= ETMCTRL_CYCLEACCURATE;
 
-	etm_unlock(t);
+	if (t->flags & TRACER_TRACE_DATA)
+		v |= ETMCTRL_DATA_DO_ADDR;
 
-	etm_writel(t, v, ETMR_CTRL);
+	etm_unlock(t, id);
 
-	while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+	etm_writel(t, id, v, ETMR_CTRL);
+
+	while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
 		;
 	if (!timeout) {
 		dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-		etm_lock(t);
+		etm_lock(t, id);
 		return -EFAULT;
 	}
 
-	etm_setup_address_range(t, 1, (unsigned long)_stext,
-			(unsigned long)_etext, 0, 0);
-	etm_writel(t, 0, ETMR_TRACEENCTRL2);
-	etm_writel(t, 0, ETMR_TRACESSCTRL);
-	etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+	if (t->range_start || t->range_end)
+		etm_setup_address_range(t, id, 1,
+					t->range_start, t->range_end, 0, 0);
+	else
+		etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+	etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+	etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+	etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+	etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+	etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+	if (t->data_range_start || t->data_range_end)
+		etm_setup_address_range(t, id, 2, t->data_range_start,
+					t->data_range_end, 0, 1);
+	else
+		etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+	etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
 
 	v &= ~ETMCTRL_PROGRAM;
 	v |= ETMCTRL_PORTSEL;
 
-	etm_writel(t, v, ETMR_CTRL);
+	etm_writel(t, id, v, ETMR_CTRL);
 
 	timeout = TRACER_TIMEOUT;
-	while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+	while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
 		;
 	if (!timeout) {
 		dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
-		etm_lock(t);
+		etm_lock(t, id);
 		return -EFAULT;
 	}
 
-	etm_lock(t);
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+	int ret;
+	int id;
+	u32 etb_fc = t->etb_fc;
+
+	etb_unlock(t);
+
+	t->dump_initial_etb = false;
+	etb_writel(t, 0, ETBR_WRITEADDR);
+	etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+	etb_writel(t, 1, ETBR_CTRL);
+
+	etb_lock(t);
+
+	/* configure etm(s) */
+	for (id = 0; id < t->etm_regs_count; id++) {
+		ret = trace_start_etm(t, id);
+		if (ret)
+			return ret;
+	}
 
 	t->flags |= TRACER_RUNNING;
 
 	return 0;
 }
 
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
 {
 	unsigned long timeout = TRACER_TIMEOUT;
 
-	etm_unlock(t);
+	etm_unlock(t, id);
 
-	etm_writel(t, 0x440, ETMR_CTRL);
-	while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+	etm_writel(t, id, 0x441, ETMR_CTRL);
+	while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
 		;
 	if (!timeout) {
 		dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-		etm_lock(t);
+		etm_lock(t, id);
 		return -EFAULT;
 	}
 
-	etm_lock(t);
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+	int id;
+	int ret;
+	unsigned long timeout = TRACER_TIMEOUT;
+	u32 etb_fc = t->etb_fc;
+
+	for (id = 0; id < t->etm_regs_count; id++) {
+		ret = trace_stop_etm(t, id);
+		if (ret)
+			return ret;
+	}
 
 	etb_unlock(t);
-	etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+	if (etb_fc) {
+		etb_fc |= ETBFF_STOPFL;
+		etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+	}
+	etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
 
 	timeout = TRACER_TIMEOUT;
 	while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -184,24 +255,15 @@
 static int etb_getdatalen(struct tracectx *t)
 {
 	u32 v;
-	int rp, wp;
+	int wp;
 
 	v = etb_readl(t, ETBR_STATUS);
 
 	if (v & 1)
 		return t->etb_bufsz;
 
-	rp = etb_readl(t, ETBR_READADDR);
 	wp = etb_readl(t, ETBR_WRITEADDR);
-
-	if (rp > wp) {
-		etb_writel(t, 0, ETBR_READADDR);
-		etb_writel(t, 0, ETBR_WRITEADDR);
-
-		return 0;
-	}
-
-	return wp - rp;
+	return wp;
 }
 
 /* sysrq+v will always stop the running trace and leave it at that */
@@ -234,21 +296,18 @@
 		printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
 	printk(KERN_INFO "\n--- ETB buffer end ---\n");
 
-	/* deassert the overflow bit */
-	etb_writel(t, 1, ETBR_CTRL);
-	etb_writel(t, 0, ETBR_CTRL);
-
-	etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-	etb_writel(t, 0, ETBR_READADDR);
-	etb_writel(t, 0, ETBR_WRITEADDR);
-
 	etb_lock(t);
 }
 
 static void sysrq_etm_dump(int key)
 {
+	if (!mutex_trylock(&tracer.mutex)) {
+		printk(KERN_INFO "Tracing hardware busy\n");
+		return;
+	}
 	dev_dbg(tracer.dev, "Dumping ETB buffer\n");
 	etm_dump();
+	mutex_unlock(&tracer.mutex);
 }
 
 static struct sysrq_key_op sysrq_etm_op = {
@@ -275,6 +334,10 @@
 	struct tracectx *t = file->private_data;
 	u32 first = 0;
 	u32 *buf;
+	int wpos;
+	int skip;
+	long wlength;
+	loff_t pos = *ppos;
 
 	mutex_lock(&t->mutex);
 
@@ -286,31 +349,39 @@
 	etb_unlock(t);
 
 	total = etb_getdatalen(t);
+	if (total == 0 && t->dump_initial_etb)
+		total = t->etb_bufsz;
 	if (total == t->etb_bufsz)
 		first = etb_readl(t, ETBR_WRITEADDR);
 
+	if (pos > total * 4) {
+		skip = 0;
+		wpos = total;
+	} else {
+		skip = (int)pos % 4;
+		wpos = (int)pos / 4;
+	}
+	total -= wpos;
+	first = (first + wpos) % t->etb_bufsz;
+
 	etb_writel(t, first, ETBR_READADDR);
 
-	length = min(total * 4, (int)len);
-	buf = vmalloc(length);
+	wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+	length = min(total * 4 - skip, (int)len);
+	buf = vmalloc(wlength * 4);
 
-	dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+	dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+		length, pos, wlength, first);
+	dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
 	dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
-	for (i = 0; i < length / 4; i++)
+	for (i = 0; i < wlength; i++)
 		buf[i] = etb_readl(t, ETBR_READMEM);
 
-	/* the only way to deassert overflow bit in ETB status is this */
-	etb_writel(t, 1, ETBR_CTRL);
-	etb_writel(t, 0, ETBR_CTRL);
-
-	etb_writel(t, 0, ETBR_WRITEADDR);
-	etb_writel(t, 0, ETBR_READADDR);
-	etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
 	etb_lock(t);
 
-	length -= copy_to_user(data, buf, length);
+	length -= copy_to_user(data, (u8 *)buf + skip, length);
 	vfree(buf);
+	*ppos = pos + length;
 
 out:
 	mutex_unlock(&t->mutex);
@@ -347,28 +418,17 @@
 	if (ret)
 		goto out;
 
+	mutex_lock(&t->mutex);
 	t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
 	if (!t->etb_regs) {
 		ret = -ENOMEM;
 		goto out_release;
 	}
 
+	t->dev = &dev->dev;
+	t->dump_initial_etb = true;
 	amba_set_drvdata(dev, t);
 
-	etb_miscdev.parent = &dev->dev;
-
-	ret = misc_register(&etb_miscdev);
-	if (ret)
-		goto out_unmap;
-
-	t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
-	if (IS_ERR(t->emu_clk)) {
-		dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
-		return -EFAULT;
-	}
-
-	clk_enable(t->emu_clk);
-
 	etb_unlock(t);
 	t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
 	dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -377,6 +437,20 @@
 	etb_writel(t, 0, ETBR_CTRL);
 	etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
 	etb_lock(t);
+	mutex_unlock(&t->mutex);
+
+	etb_miscdev.parent = &dev->dev;
+
+	ret = misc_register(&etb_miscdev);
+	if (ret)
+		goto out_unmap;
+
+	/* Get optional clock. Currently used to select clock source on omap3 */
+	t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+	if (IS_ERR(t->emu_clk))
+		dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+	else
+		clk_enable(t->emu_clk);
 
 	dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
 
@@ -384,10 +458,13 @@
 	return ret;
 
 out_unmap:
+	mutex_lock(&t->mutex);
 	amba_set_drvdata(dev, NULL);
 	iounmap(t->etb_regs);
+	t->etb_regs = NULL;
 
 out_release:
+	mutex_unlock(&t->mutex);
 	amba_release_regions(dev);
 
 	return ret;
@@ -402,8 +479,10 @@
 	iounmap(t->etb_regs);
 	t->etb_regs = NULL;
 
-	clk_disable(t->emu_clk);
-	clk_put(t->emu_clk);
+	if (!IS_ERR(t->emu_clk)) {
+		clk_disable(t->emu_clk);
+		clk_put(t->emu_clk);
+	}
 
 	amba_release_regions(dev);
 
@@ -447,7 +526,10 @@
 		return -EINVAL;
 
 	mutex_lock(&tracer.mutex);
-	ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+	if (!tracer.etb_regs)
+		ret = -ENODEV;
+	else
+		ret = value ? trace_start(&tracer) : trace_stop(&tracer);
 	mutex_unlock(&tracer.mutex);
 
 	return ret ? : n;
@@ -462,36 +544,50 @@
 {
 	u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
 	int datalen;
+	int id;
+	int ret;
 
-	etb_unlock(&tracer);
-	datalen = etb_getdatalen(&tracer);
-	etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
-	etb_ra = etb_readl(&tracer, ETBR_READADDR);
-	etb_st = etb_readl(&tracer, ETBR_STATUS);
-	etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
-	etb_lock(&tracer);
+	mutex_lock(&tracer.mutex);
+	if (tracer.etb_regs) {
+		etb_unlock(&tracer);
+		datalen = etb_getdatalen(&tracer);
+		etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+		etb_ra = etb_readl(&tracer, ETBR_READADDR);
+		etb_st = etb_readl(&tracer, ETBR_STATUS);
+		etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+		etb_lock(&tracer);
+	} else {
+		etb_wa = etb_ra = etb_st = etb_fc = ~0;
+		datalen = -1;
+	}
 
-	etm_unlock(&tracer);
-	etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
-	etm_st = etm_readl(&tracer, ETMR_STATUS);
-	etm_lock(&tracer);
-
-	return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+	ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
 			"ETBR_WRITEADDR:\t%08x\n"
 			"ETBR_READADDR:\t%08x\n"
 			"ETBR_STATUS:\t%08x\n"
-			"ETBR_FORMATTERCTRL:\t%08x\n"
-			"ETMR_CTRL:\t%08x\n"
-			"ETMR_STATUS:\t%08x\n",
+			"ETBR_FORMATTERCTRL:\t%08x\n",
 			datalen,
 			tracer.ncmppairs,
 			etb_wa,
 			etb_ra,
 			etb_st,
-			etb_fc,
+			etb_fc
+			);
+
+	for (id = 0; id < tracer.etm_regs_count; id++) {
+		etm_unlock(&tracer, id);
+		etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+		etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+		etm_lock(&tracer, id);
+		ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+			"ETMR_STATUS:\t%08x\n",
 			etm_ctrl,
 			etm_st
 			);
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return ret;
 }
 
 static struct kobj_attribute trace_info_attr =
@@ -530,42 +626,121 @@
 static struct kobj_attribute trace_mode_attr =
 	__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
 
+static ssize_t trace_range_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%08lx %08lx\n",
+			tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned long range_start, range_end;
+
+	if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.range_start = range_start;
+	tracer.range_end = range_end;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+	__ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	unsigned long range_start;
+	u64 range_end;
+	mutex_lock(&tracer.mutex);
+	range_start = tracer.data_range_start;
+	range_end = tracer.data_range_end;
+	if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+		range_end = 0x100000000ULL;
+	mutex_unlock(&tracer.mutex);
+	return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned long range_start;
+	u64 range_end;
+
+	if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.data_range_start = range_start;
+	tracer.data_range_end = (unsigned long)range_end;
+	if (range_end)
+		tracer.flags |= TRACER_TRACE_DATA;
+	else
+		tracer.flags &= ~TRACER_TRACE_DATA;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+	__ATTR(trace_data_range, 0644,
+		trace_data_range_show, trace_data_range_store);
+
 static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
 {
 	struct tracectx *t = &tracer;
 	int ret = 0;
+	void __iomem **new_regs;
+	int new_count;
 
-	if (t->etm_regs) {
-		dev_dbg(&dev->dev, "ETM already initialized\n");
-		ret = -EBUSY;
+	mutex_lock(&t->mutex);
+	new_count = t->etm_regs_count + 1;
+	new_regs = krealloc(t->etm_regs,
+				sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
+
+	if (!new_regs) {
+		dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+		ret = -ENOMEM;
 		goto out;
 	}
+	t->etm_regs = new_regs;
 
 	ret = amba_request_regions(dev, NULL);
 	if (ret)
 		goto out;
 
-	t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
-	if (!t->etm_regs) {
+	t->etm_regs[t->etm_regs_count] =
+		ioremap_nocache(dev->res.start, resource_size(&dev->res));
+	if (!t->etm_regs[t->etm_regs_count]) {
 		ret = -ENOMEM;
 		goto out_release;
 	}
 
-	amba_set_drvdata(dev, t);
+	amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
 
-	mutex_init(&t->mutex);
-	t->dev = &dev->dev;
-	t->flags = TRACER_CYCLE_ACC;
+	t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA;
 	t->etm_portsz = 1;
 
-	etm_unlock(t);
-	(void)etm_readl(t, ETMMR_PDSR);
+	etm_unlock(t, t->etm_regs_count);
+	(void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
 	/* dummy first read */
-	(void)etm_readl(&tracer, ETMMR_OSSRR);
+	(void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
 
-	t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
-	etm_writel(t, 0x440, ETMR_CTRL);
-	etm_lock(t);
+	t->ncmppairs = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE) & 0xf;
+	etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+	etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+	etm_lock(t, t->etm_regs_count);
 
 	ret = sysfs_create_file(&dev->dev.kobj,
 			&trace_running_attr.attr);
@@ -581,35 +756,67 @@
 	if (ret)
 		dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
 
-	dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+	ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+	ret = sysfs_create_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev,
+			"Failed to create trace_data_range in sysfs\n");
+
+	dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+	/* Enable formatter if there are multiple trace sources */
+	if (new_count > 1)
+		t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+	t->etm_regs_count = new_count;
 
 out:
+	mutex_unlock(&t->mutex);
 	return ret;
 
 out_unmap:
 	amba_set_drvdata(dev, NULL);
-	iounmap(t->etm_regs);
+	iounmap(t->etm_regs[t->etm_regs_count]);
 
 out_release:
 	amba_release_regions(dev);
 
+	mutex_unlock(&t->mutex);
 	return ret;
 }
 
 static int etm_remove(struct amba_device *dev)
 {
-	struct tracectx *t = amba_get_drvdata(dev);
-
-	amba_set_drvdata(dev, NULL);
-
-	iounmap(t->etm_regs);
-	t->etm_regs = NULL;
-
-	amba_release_regions(dev);
+	int i;
+	struct tracectx *t = &tracer;
+	void __iomem	*etm_regs = amba_get_drvdata(dev);
 
 	sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
 	sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
 	sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+
+	amba_set_drvdata(dev, NULL);
+
+	mutex_lock(&t->mutex);
+	for (i = 0; i < t->etm_regs_count; i++)
+		if (t->etm_regs[i] == etm_regs)
+			break;
+	for (; i < t->etm_regs_count - 1; i++)
+		t->etm_regs[i] = t->etm_regs[i + 1];
+	t->etm_regs_count--;
+	if (!t->etm_regs_count) {
+		kfree(t->etm_regs);
+		t->etm_regs = NULL;
+	}
+	mutex_unlock(&t->mutex);
+
+	iounmap(etm_regs);
+	amba_release_regions(dev);
 
 	return 0;
 }
@@ -619,6 +826,10 @@
 		.id	= 0x0003b921,
 		.mask	= 0x0007ffff,
 	},
+	{
+		.id	= 0x0003b950,
+		.mask	= 0x0007ffff,
+	},
 	{ 0, 0 },
 };
 
@@ -636,6 +847,8 @@
 {
 	int retval;
 
+	mutex_init(&tracer.mutex);
+
 	retval = amba_driver_register(&etb_driver);
 	if (retval) {
 		printk(KERN_ERR "Failed to register etb\n");
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 5e1e541..34ea864 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -251,6 +251,77 @@
 	arm_pm_restart(reboot_mode, cmd);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+	int	i, j;
+	int	nlines;
+	u32	*p;
+
+	/*
+	 * don't attempt to dump non-kernel addresses or
+	 * values that are probably just small negative numbers
+	 */
+	if (addr < PAGE_OFFSET || addr > -256UL)
+		return;
+
+	printk("\n%s: %#lx:\n", name, addr);
+
+	/*
+	 * round address down to a 32 bit boundary
+	 * and always dump a multiple of 32 bytes
+	 */
+	p = (u32 *)(addr & ~(sizeof(u32) - 1));
+	nbytes += (addr & (sizeof(u32) - 1));
+	nlines = (nbytes + 31) / 32;
+
+
+	for (i = 0; i < nlines; i++) {
+		/*
+		 * just display low 16 bits of address to keep
+		 * each line of the dump < 80 characters
+		 */
+		printk("%04lx ", (unsigned long)p & 0xffff);
+		for (j = 0; j < 8; j++) {
+			u32	data;
+			if (probe_kernel_address(p, data)) {
+				printk(" ********");
+			} else {
+				printk(" %08x", data);
+			}
+			++p;
+		}
+		printk("\n");
+	}
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+	mm_segment_t fs;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+	show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+	show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+	show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+	show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+	show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+	show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+	show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+	show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+	show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+	show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+	show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+	show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+	show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+	show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+	show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+	set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -310,6 +381,8 @@
 		printk("Control: %08x%s\n", ctrl, buf);
 	}
 #endif
+
+	show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6807cb1..56b2715 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -451,7 +451,9 @@
 		if (end > vma->vm_end)
 			end = vma->vm_end;
 
-		flush_cache_user_range(vma, start, end);
+		up_read(&mm->mmap_sem);
+		flush_cache_user_range(start, end);
+		return;
 	}
 	up_read(&mm->mmap_sem);
 }
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 73b4a8b..6b5441d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -272,6 +272,11 @@
  *	- end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	sub	r2, r1, r0
+	cmp	r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	bhi	v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
 	ldrb	r2, [r0]		@ read for ownership
 	strb	r2, [r0]		@ write for ownership
@@ -294,6 +299,18 @@
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mov	pc, lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+	mov	r0, #0
+#ifdef HARVARD_CACHE
+	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate
+#else
+	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate
+#endif
+	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
+	mov	pc, lr
+#endif
+
 /*
  *	dma_map_area(start, size, dir)
  *	- start	- kernel virtual start address
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 4fa9903..c1a9784 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -10,7 +10,7 @@
  *
  * Basic entry code, called from the kernel's undefined instruction trap.
  *  r0  = faulted instruction
- *  r5  = faulted PC+4
+ *  r2  = faulted PC+4
  *  r9  = successful return
  *  r10 = thread_info structure
  *  lr  = failure return
@@ -26,6 +26,7 @@
 	str	r11, [r10, #TI_PREEMPT]
 #endif
 	enable_irq
+	str	r2, [sp, #S_PC]		@ update regs->ARM_pc for Thumb 2 case
  	ldr	r4, .LCvfp
 	ldr	r11, [r10, #TI_CPU]	@ CPU number
 	add	r10, r10, #TI_VFPSTATE	@ r10 = workspace
diff --git a/block/genhd.c b/block/genhd.c
index 3608289..cbf7b88 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1105,6 +1105,22 @@
 	free_part_info(&disk->part0);
 	kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+	int cnt = 0;
+
+	disk_part_iter_init(&piter, disk, 0);
+	while((part = disk_part_iter_next(&piter)))
+		cnt++;
+	disk_part_iter_exit(&piter);
+	add_uevent_var(env, "NPARTS=%u", cnt);
+	return 0;
+}
+
 struct class block_class = {
 	.name		= "block",
 };
@@ -1123,6 +1139,7 @@
 	.groups		= disk_attr_groups,
 	.release	= disk_release,
 	.devnode	= block_devnode,
+	.uevent		= disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3bb154d..d0258eb 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -96,6 +96,8 @@
 
 source "drivers/nfc/Kconfig"
 
+source "drivers/switch/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 09f3232..4ea4ac9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -98,6 +98,7 @@
 obj-$(CONFIG_MMC)		+= mmc/
 obj-$(CONFIG_MEMSTICK)		+= memstick/
 obj-y				+= leds/
+obj-$(CONFIG_SWITCH)		+= switch/
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 06f09bf..4282d44 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -27,6 +27,7 @@
 #include <linux/sched.h>
 #include <linux/async.h>
 #include <linux/suspend.h>
+#include <linux/timer.h>
 
 #include "../base.h"
 #include "power.h"
@@ -49,6 +50,12 @@
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
+static void dpm_drv_timeout(unsigned long data);
+struct dpm_drv_wd_data {
+	struct device *dev;
+	struct task_struct *tsk;
+};
+
 static int async_error;
 
 /**
@@ -584,6 +591,30 @@
 }
 
 /**
+ *	dpm_drv_timeout - Driver suspend / resume watchdog handler
+ *	@data: struct device which timed out
+ *
+ * 	Called when a driver has timed out suspending or resuming.
+ * 	There's not much we can do here to recover so
+ * 	BUG() out for a crash-dump
+ *
+ */
+static void dpm_drv_timeout(unsigned long data)
+{
+	struct dpm_drv_wd_data *wd_data = (void *)data;
+	struct device *dev = wd_data->dev;
+	struct task_struct *tsk = wd_data->tsk;
+
+	printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
+	       (dev->driver ? dev->driver->name : "no driver"));
+
+	printk(KERN_EMERG "dpm suspend stack:\n");
+	show_stack(tsk, NULL);
+
+	BUG();
+}
+
+/**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.
  *
@@ -841,8 +872,19 @@
 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 {
 	int error = 0;
+	struct timer_list timer;
+	struct dpm_drv_wd_data data;
 
 	dpm_wait_for_children(dev, async);
+
+	data.dev = dev;
+	data.tsk = get_current();
+	init_timer_on_stack(&timer);
+	timer.expires = jiffies + HZ * 12;
+	timer.function = dpm_drv_timeout;
+	timer.data = (unsigned long)&data;
+	add_timer(&timer);
+
 	device_lock(dev);
 
 	if (async_error)
@@ -892,6 +934,10 @@
 
  Unlock:
 	device_unlock(dev);
+
+	del_timer_sync(&timer);
+	destroy_timer_on_stack(&timer);
+
 	complete_all(&dev->power.completion);
 
 	if (error)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bacef3..a585473 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -375,6 +375,11 @@
 
 	/* load patch and sysconfig files for AR3012 */
 	if (id->driver_info & BTUSB_ATH3012) {
+
+		/* New firmware with patch and sysconfig files already loaded */
+		if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001)
+			return -ENODEV;
+
 		ret = ath3k_load_patch(udev);
 		if (ret < 0) {
 			BT_ERR("Loading patch file failed");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c2de895..91d13a9 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -54,6 +54,7 @@
 #define BTUSB_BCM92035		0x10
 #define BTUSB_BROKEN_ISOC	0x20
 #define BTUSB_WRONG_SCO_MTU	0x40
+#define BTUSB_ATH3012		0x80
 
 static struct usb_device_id btusb_table[] = {
 	/* Generic Bluetooth USB device */
@@ -110,7 +111,7 @@
 	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
 	/* Atheros 3012 with sflash firmware */
-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
+	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -914,6 +915,15 @@
 	if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
 		return -ENODEV;
 
+	if (id->driver_info & BTUSB_ATH3012) {
+		struct usb_device *udev = interface_to_usbdev(intf);
+
+		/* Old firmware would otherwise let ath3k driver load
+		 * patch and sysconfig files */
+		if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
+			return -ENODEV;
+	}
+
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 49502bc..7d10ae3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@
 
 source "drivers/tty/Kconfig"
 
+config DEVMEM
+	bool "Memory device driver"
+	default y
+	help
+	  The memory driver provides two character devices, mem and kmem, which
+	  provide access to the system's memory. The mem device is a view of
+	  physical memory, and each byte in the device corresponds to the
+	  matching physical address. The kmem device is the same as mem, but
+	  the addresses correspond to the kernel's virtual address space rather
+	  than physical memory. These devices are standard parts of a Linux
+	  system and most users should say Y here. You might say N if very
+	  security conscience or memory is tight.
+
 config DEVKMEM
 	bool "/dev/kmem virtual device support"
 	default y
@@ -598,6 +611,10 @@
 	depends on ISA || PCI
 	default y
 
+config DCC_TTY
+	tristate "DCC tty driver"
+	depends on ARM
+
 source "drivers/s390/char/Kconfig"
 
 config RAMOOPS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7a00672..3f63254 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -58,6 +58,7 @@
 obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o
 obj-$(CONFIG_TCG_TPM)		+= tpm/
 
+obj-$(CONFIG_DCC_TTY)		+= dcc_tty.o
 obj-$(CONFIG_PS3_FLASH)		+= ps3flash.o
 obj-$(CONFIG_RAMOOPS)		+= ramoops.o
 
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644
index 0000000..a787acc
--- /dev/null
+++ b/drivers/char/dcc_tty.c
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+	char ch;
+	int rch;
+	int written;
+
+	while (g_dcc_buffer_count) {
+		ch = g_dcc_buffer[g_dcc_buffer_head];
+		asm(
+			"mrc 14, 0, r15, c0, c1, 0\n"
+			"mcrcc 14, 0, %1, c0, c5, 0\n"
+			"movcc %0, #1\n"
+			"movcs %0, #0\n"
+			: "=r" (written)
+			: "r" (ch)
+		);
+		if (written) {
+			if (ch == '\n')
+				g_dcc_buffer[g_dcc_buffer_head] = '\r';
+			else {
+				g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+				g_dcc_buffer_count--;
+				if (g_dcc_tty)
+					tty_wakeup(g_dcc_tty);
+			}
+			g_dcc_write_delay_usecs = 1;
+		} else {
+			if (g_dcc_write_delay_usecs > 0x100)
+				break;
+			g_dcc_write_delay_usecs <<= 1;
+			udelay(g_dcc_write_delay_usecs);
+		}
+	}
+
+	if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+		asm(
+			"mrc 14, 0, %0, c0, c1, 0\n"
+			"tst %0, #(1 << 30)\n"
+			"moveq %0, #-1\n"
+			"mrcne 14, 0, %0, c0, c5, 0\n"
+			: "=r" (rch)
+		);
+		if (rch >= 0) {
+			ch = rch;
+			tty_insert_flip_string(g_dcc_tty, &ch, 1);
+			tty_flip_buffer_push(g_dcc_tty);
+		}
+	}
+
+
+	if (g_dcc_buffer_count)
+		hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+	else
+		hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+	int ret;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+		g_dcc_tty = tty;
+		g_dcc_tty_open_count++;
+		ret = 0;
+	} else
+		ret = -EBUSY;
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+	printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+	return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+	printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+	if (g_dcc_tty == tty) {
+		if (--g_dcc_tty_open_count == 0)
+			g_dcc_tty = NULL;
+	}
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+	const unsigned char *buf = buf_start;
+	unsigned long irq_flags;
+	int copy_len;
+	int space_left;
+	int tail;
+
+	if (count < 1)
+		return 0;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	do {
+		tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+		copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+		space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+		if (copy_len > space_left)
+			copy_len = space_left;
+		if (copy_len > count)
+			copy_len = count;
+		memcpy(&g_dcc_buffer[tail], buf, copy_len);
+		g_dcc_buffer_count += copy_len;
+		buf += copy_len;
+		count -= copy_len;
+		if (copy_len < count && copy_len < space_left) {
+			space_left -= copy_len;
+			copy_len = count;
+			if (copy_len > space_left) {
+				copy_len = space_left;
+			}
+			memcpy(g_dcc_buffer, buf, copy_len);
+			buf += copy_len;
+			count -= copy_len;
+			g_dcc_buffer_count += copy_len;
+		}
+		dcc_poll_locked();
+		space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+	} while(count && space_left);
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+	return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+	int ret;
+	/* printk("dcc_tty_write %p, %d\n", buf, count); */
+	ret = dcc_write(buf, count);
+	if (ret != count)
+		printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+	return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+	int space_left;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+	return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	int ret;
+	asm(
+		"mrc 14, 0, %0, c0, c1, 0\n"
+		"mov %0, %0, LSR #30\n"
+		"and %0, %0, #1\n"
+		: "=r" (ret)
+	);
+	return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	dcc_poll_locked();
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	dcc_poll_locked();
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+	return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+	dcc_write(b, count);
+#else
+	/* blocking printk */
+	while (count > 0) {
+		int written;
+		written = dcc_write(b, count);
+		if (written) {
+			b += written;
+			count -= written;
+		}
+	}
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+	*index = 0;
+	return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+	if (co->index != 0)
+		return -ENODEV;
+	return 0;
+}
+
+
+static struct console dcc_console =
+{
+	.name		= "ttyDCC",
+	.write		= dcc_console_write,
+	.device		= dcc_console_device,
+	.setup		= dcc_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+	.open = dcc_tty_open,
+	.close = dcc_tty_close,
+	.write = dcc_tty_write,
+	.write_room = dcc_tty_write_room,
+	.chars_in_buffer = dcc_tty_chars_in_buffer,
+	.unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+	int ret;
+
+	hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	g_dcc_timer.function = dcc_tty_timer_func;
+
+	g_dcc_tty_driver = alloc_tty_driver(1);
+	if (!g_dcc_tty_driver) {
+		printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+		ret = -ENOMEM;
+		goto err_alloc_tty_driver_failed;
+	}
+	g_dcc_tty_driver->owner = THIS_MODULE;
+	g_dcc_tty_driver->driver_name = "dcc";
+	g_dcc_tty_driver->name = "ttyDCC";
+	g_dcc_tty_driver->major = 0; // auto assign
+	g_dcc_tty_driver->minor_start = 0;
+	g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	g_dcc_tty_driver->init_termios = tty_std_termios;
+	g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+	ret = tty_register_driver(g_dcc_tty_driver);
+	if (ret) {
+		printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+		goto err_tty_register_driver_failed;
+	}
+	tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+	register_console(&dcc_console);
+	hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+	return 0;
+
+err_tty_register_driver_failed:
+	put_tty_driver(g_dcc_tty_driver);
+	g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+	return ret;
+}
+
+static void  __exit dcc_tty_exit(void)
+{
+	int ret;
+
+	tty_unregister_device(g_dcc_tty_driver, 0);
+	ret = tty_unregister_driver(g_dcc_tty_driver);
+	if (ret < 0) {
+		printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+	} else {
+		put_tty_driver(g_dcc_tty_driver);
+	}
+	g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8fc04b4..9b1eb18 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -56,6 +56,7 @@
 }
 #endif
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 #ifdef CONFIG_STRICT_DEVMEM
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -81,7 +82,9 @@
 	return 1;
 }
 #endif
+#endif
 
+#ifdef CONFIG_DEVMEM
 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 {
 }
@@ -208,6 +211,9 @@
 	*ppos += written;
 	return written;
 }
+#endif	/* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 
 int __weak phys_mem_access_prot_allowed(struct file *file,
 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -329,6 +335,7 @@
 	}
 	return 0;
 }
+#endif	/* CONFIG_DEVMEM */
 
 #ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -693,6 +700,8 @@
 	return file->f_pos = 0;
 }
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
 /*
  * The memory devices use the full 32/64 bits of the offset, and so we cannot
  * check against negative addresses: they are ok. The return value is weird,
@@ -726,10 +735,14 @@
 	return ret;
 }
 
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
 static int open_port(struct inode * inode, struct file * filp)
 {
 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
+#endif
 
 #define zero_lseek	null_lseek
 #define full_lseek      null_lseek
@@ -739,6 +752,7 @@
 #define open_kmem	open_mem
 #define open_oldmem	open_mem
 
+#ifdef CONFIG_DEVMEM
 static const struct file_operations mem_fops = {
 	.llseek		= memory_lseek,
 	.read		= read_mem,
@@ -747,6 +761,7 @@
 	.open		= open_mem,
 	.get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 #ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
@@ -850,7 +865,9 @@
 	const struct file_operations *fops;
 	struct backing_dev_info *dev_info;
 } devlist[] = {
+#ifdef CONFIG_DEVMEM
 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
 #ifdef CONFIG_DEVKMEM
 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
 #endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 9fb8485..20facb8 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -99,6 +99,16 @@
 	  Be aware that not all cpufreq drivers support the conservative
 	  governor. If unsure have a look at the help section of the
 	  driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+	bool "interactive"
+	select CPU_FREQ_GOV_INTERACTIVE
+	help
+	  Use the CPUFreq governor 'interactive' as default. This allows
+	  you to get a full dynamic cpu frequency capable system by simply
+	  loading your cpufreq low-level hardware driver, using the
+	  'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -156,6 +166,12 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	tristate "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e2fc2d2..c044060 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)	+= cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 
 # CPUfreq cross-arch helpers
 obj-$(CONFIG_CPU_FREQ_TABLE)		+= freq_table.o
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644
index 0000000..bcbb7ac
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -0,0 +1,723 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+
+#include <asm/cputime.h>
+
+static void (*pm_idle_old)(void);
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct cpufreq_interactive_cpuinfo {
+	struct timer_list cpu_timer;
+	int timer_idlecancel;
+	u64 time_in_idle;
+	u64 idle_exit_time;
+	u64 timer_run_time;
+	int idling;
+	u64 freq_change_time;
+	u64 freq_change_time_in_idle;
+	struct cpufreq_policy *policy;
+	struct cpufreq_frequency_table *freq_table;
+	unsigned int target_freq;
+	int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* Workqueues handle frequency scaling */
+static struct task_struct *up_task;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_down_work;
+static cpumask_t up_cpumask;
+static spinlock_t up_cpumask_lock;
+static cpumask_t down_cpumask;
+static spinlock_t down_cpumask_lock;
+
+/* Go to max speed when CPU load at or above this value. */
+#define DEFAULT_GO_MAXSPEED_LOAD 85
+static unsigned long go_maxspeed_load;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME 80000;
+static unsigned long min_sample_time;
+
+#define DEBUG 0
+#define BUFSZ 128
+
+#if DEBUG
+#include <linux/proc_fs.h>
+
+struct dbgln {
+	int cpu;
+	unsigned long jiffy;
+	unsigned long run;
+	char buf[BUFSZ];
+};
+
+#define NDBGLNS 256
+
+static struct dbgln dbgbuf[NDBGLNS];
+static int dbgbufs;
+static int dbgbufe;
+static struct proc_dir_entry	*dbg_proc;
+static spinlock_t dbgpr_lock;
+
+static u64 up_request_time;
+static unsigned int up_max_latency;
+
+static void dbgpr(char *fmt, ...)
+{
+	va_list args;
+	int n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbgpr_lock, flags);
+	n = dbgbufe;
+        va_start(args, fmt);
+        vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
+        va_end(args);
+	dbgbuf[n].cpu = smp_processor_id();
+	dbgbuf[n].run = nr_running();
+	dbgbuf[n].jiffy = jiffies;
+
+	if (++dbgbufe >= NDBGLNS)
+		dbgbufe = 0;
+
+	if (dbgbufe == dbgbufs)
+		if (++dbgbufs >= NDBGLNS)
+			dbgbufs = 0;
+
+	spin_unlock_irqrestore(&dbgpr_lock, flags);
+}
+
+static void dbgdump(void)
+{
+	int i, j;
+	unsigned long flags;
+	static struct dbgln prbuf[NDBGLNS];
+
+	spin_lock_irqsave(&dbgpr_lock, flags);
+	i = dbgbufs;
+	j = dbgbufe;
+	memcpy(prbuf, dbgbuf, sizeof(dbgbuf));
+	dbgbufs = 0;
+	dbgbufe = 0;
+	spin_unlock_irqrestore(&dbgpr_lock, flags);
+
+	while (i != j)
+	{
+		printk("%lu %d %lu %s",
+		       prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run,
+		       prbuf[i].buf);
+		if (++i == NDBGLNS)
+			i = 0;
+	}
+}
+
+static int dbg_proc_read(char *buffer, char **start, off_t offset,
+			       int count, int *peof, void *dat)
+{
+	printk("max up_task latency=%uus\n", up_max_latency);
+	dbgdump();
+	*peof = 1;
+	return 0;
+}
+
+
+#else
+#define dbgpr(...) do {} while (0)
+#endif
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+		unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+	.name = "interactive",
+	.governor = cpufreq_governor_interactive,
+	.max_transition_latency = 10000000,
+	.owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	int cpu_load;
+	int load_since_change;
+	u64 time_in_idle;
+	u64 idle_exit_time;
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, data);
+	u64 now_idle;
+	unsigned int new_freq;
+	unsigned int index;
+	unsigned long flags;
+
+	smp_rmb();
+
+	if (!pcpu->governor_enabled)
+		goto exit;
+
+	/*
+	 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
+	 * this lets idle exit know the current idle time sample has
+	 * been processed, and idle exit can generate a new sample and
+	 * re-arm the timer.  This prevents a concurrent idle
+	 * exit on that CPU from writing a new set of info at the same time
+	 * the timer function runs (the timer function can't use that info
+	 * until more time passes).
+	 */
+	time_in_idle = pcpu->time_in_idle;
+	idle_exit_time = pcpu->idle_exit_time;
+	now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
+	smp_wmb();
+
+	/* If we raced with cancelling a timer, skip. */
+	if (!idle_exit_time) {
+		dbgpr("timer %d: no valid idle exit sample\n", (int) data);
+		goto exit;
+	}
+
+#if DEBUG
+	if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
+		dbgpr("timer %d: late by %d ticks\n",
+		      (int) data, jiffies - pcpu->cpu_timer.expires);
+#endif
+
+	delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
+	delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
+						  idle_exit_time);
+
+	/*
+	 * If timer ran less than 1ms after short-term sample started, retry.
+	 */
+	if (delta_time < 1000) {
+		dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
+		      delta_time, idle_exit_time, pcpu->timer_run_time);
+		goto rearm;
+	}
+
+	if (delta_idle > delta_time)
+		cpu_load = 0;
+	else
+		cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+
+	delta_idle = (unsigned int) cputime64_sub(now_idle,
+						 pcpu->freq_change_time_in_idle);
+	delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
+						  pcpu->freq_change_time);
+
+	if (delta_idle > delta_time)
+		load_since_change = 0;
+	else
+		load_since_change =
+			100 * (delta_time - delta_idle) / delta_time;
+
+	/*
+	 * Choose greater of short-term load (since last idle timer
+	 * started or timer function re-armed itself) or long-term load
+	 * (since last frequency change).
+	 */
+	if (load_since_change > cpu_load)
+		cpu_load = load_since_change;
+
+	if (cpu_load >= go_maxspeed_load)
+		new_freq = pcpu->policy->max;
+	else
+		new_freq = pcpu->policy->max * cpu_load / 100;
+
+	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+					   new_freq, CPUFREQ_RELATION_H,
+					   &index)) {
+		dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
+		goto rearm;
+	}
+
+	new_freq = pcpu->freq_table[index].frequency;
+
+	if (pcpu->target_freq == new_freq)
+	{
+		dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
+		goto rearm_if_notmax;
+	}
+
+	/*
+	 * Do not scale down unless we have been at this frequency for the
+	 * minimum sample time.
+	 */
+	if (new_freq < pcpu->target_freq) {
+		if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
+		    min_sample_time) {
+			dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+			goto rearm;
+		}
+	}
+
+	dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+
+	if (new_freq < pcpu->target_freq) {
+		pcpu->target_freq = new_freq;
+		spin_lock_irqsave(&down_cpumask_lock, flags);
+		cpumask_set_cpu(data, &down_cpumask);
+		spin_unlock_irqrestore(&down_cpumask_lock, flags);
+		queue_work(down_wq, &freq_scale_down_work);
+	} else {
+		pcpu->target_freq = new_freq;
+#if DEBUG
+		up_request_time = ktime_to_us(ktime_get());
+#endif
+		spin_lock_irqsave(&up_cpumask_lock, flags);
+		cpumask_set_cpu(data, &up_cpumask);
+		spin_unlock_irqrestore(&up_cpumask_lock, flags);
+		wake_up_process(up_task);
+	}
+
+rearm_if_notmax:
+	/*
+	 * Already set max speed and don't see a need to change that,
+	 * wait until next idle to re-evaluate, don't need timer.
+	 */
+	if (pcpu->target_freq == pcpu->policy->max)
+		goto exit;
+
+rearm:
+	if (!timer_pending(&pcpu->cpu_timer)) {
+		/*
+		 * If already at min: if that CPU is idle, don't set timer.
+		 * Else cancel the timer if that CPU goes idle.  We don't
+		 * need to re-evaluate speed until the next idle exit.
+		 */
+		if (pcpu->target_freq == pcpu->policy->min) {
+			smp_rmb();
+
+			if (pcpu->idling) {
+				dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
+				goto exit;
+			}
+
+			pcpu->timer_idlecancel = 1;
+		}
+
+		pcpu->time_in_idle = get_cpu_idle_time_us(
+			data, &pcpu->idle_exit_time);
+		mod_timer(&pcpu->cpu_timer, jiffies + 2);
+		dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
+	}
+
+exit:
+	return;
+}
+
+static void cpufreq_interactive_idle(void)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, smp_processor_id());
+	int pending;
+
+	if (!pcpu->governor_enabled) {
+		pm_idle_old();
+		return;
+	}
+
+	pcpu->idling = 1;
+	smp_wmb();
+	pending = timer_pending(&pcpu->cpu_timer);
+
+	if (pcpu->target_freq != pcpu->policy->min) {
+#ifdef CONFIG_SMP
+		/*
+		 * Entering idle while not at lowest speed.  On some
+		 * platforms this can hold the other CPU(s) at that speed
+		 * even though the CPU is idle. Set a timer to re-evaluate
+		 * speed so this idle CPU doesn't hold the other CPUs above
+		 * min indefinitely.  This should probably be a quirk of
+		 * the CPUFreq driver.
+		 */
+		if (!pending) {
+			pcpu->time_in_idle = get_cpu_idle_time_us(
+				smp_processor_id(), &pcpu->idle_exit_time);
+			pcpu->timer_idlecancel = 0;
+			mod_timer(&pcpu->cpu_timer, jiffies + 2);
+			dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n",
+			      pcpu->target_freq, pcpu->cpu_timer.expires,
+			      pcpu->idle_exit_time);
+		}
+#endif
+	} else {
+		/*
+		 * If at min speed and entering idle after load has
+		 * already been evaluated, and a timer has been set just in
+		 * case the CPU suddenly goes busy, cancel that timer.  The
+		 * CPU didn't go busy; we'll recheck things upon idle exit.
+		 */
+		if (pending && pcpu->timer_idlecancel) {
+			dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires);
+			del_timer(&pcpu->cpu_timer);
+			/*
+			 * Ensure last timer run time is after current idle
+			 * sample start time, so next idle exit will always
+			 * start a new idle sampling period.
+			 */
+			pcpu->idle_exit_time = 0;
+			pcpu->timer_idlecancel = 0;
+		}
+	}
+
+	pm_idle_old();
+	pcpu->idling = 0;
+	smp_wmb();
+
+	/*
+	 * Arm the timer for 1-2 ticks later if not already, and if the timer
+	 * function has already processed the previous load sampling
+	 * interval.  (If the timer is not pending but has not processed
+	 * the previous interval, it is probably racing with us on another
+	 * CPU.  Let it compute load based on the previous sample and then
+	 * re-arm the timer for another interval when it's done, rather
+	 * than updating the interval start time to be "now", which doesn't
+	 * give the timer function enough time to make a decision on this
+	 * run.)
+	 */
+	if (timer_pending(&pcpu->cpu_timer) == 0 &&
+	    pcpu->timer_run_time >= pcpu->idle_exit_time &&
+	    pcpu->governor_enabled) {
+		pcpu->time_in_idle =
+			get_cpu_idle_time_us(smp_processor_id(),
+					     &pcpu->idle_exit_time);
+		pcpu->timer_idlecancel = 0;
+		mod_timer(&pcpu->cpu_timer, jiffies + 2);
+		dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time);
+#if DEBUG
+	} else if (timer_pending(&pcpu->cpu_timer) == 0 &&
+		   pcpu->timer_run_time < pcpu->idle_exit_time) {
+		dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n",
+		      pcpu->idle_exit_time, pcpu->timer_run_time);
+#endif
+	}
+
+}
+
+static int cpufreq_interactive_up_task(void *data)
+{
+	unsigned int cpu;
+	cpumask_t tmp_mask;
+	unsigned long flags;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+#if DEBUG
+	u64 now;
+	u64 then;
+	unsigned int lat;
+#endif
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&up_cpumask_lock, flags);
+
+		if (cpumask_empty(&up_cpumask)) {
+			spin_unlock_irqrestore(&up_cpumask_lock, flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&up_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+
+#if DEBUG
+		then = up_request_time;
+		now = ktime_to_us(ktime_get());
+
+		if (now > then) {
+			lat = ktime_to_us(ktime_get()) - then;
+
+			if (lat > up_max_latency)
+				up_max_latency = lat;
+		}
+#endif
+
+		tmp_mask = up_cpumask;
+		cpumask_clear(&up_cpumask);
+		spin_unlock_irqrestore(&up_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			pcpu = &per_cpu(cpuinfo, cpu);
+
+			if (nr_running() == 1) {
+				dbgpr("up %d: tgt=%d nothing else running\n", cpu,
+				      pcpu->target_freq);
+			}
+
+			smp_rmb();
+
+			if (!pcpu->governor_enabled)
+				continue;
+
+			__cpufreq_driver_target(pcpu->policy,
+						pcpu->target_freq,
+						CPUFREQ_RELATION_H);
+			pcpu->freq_change_time_in_idle =
+				get_cpu_idle_time_us(cpu,
+						     &pcpu->freq_change_time);
+			dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
+		}
+	}
+
+	return 0;
+}
+
+static void cpufreq_interactive_freq_down(struct work_struct *work)
+{
+	unsigned int cpu;
+	cpumask_t tmp_mask;
+	unsigned long flags;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+	spin_lock_irqsave(&down_cpumask_lock, flags);
+	tmp_mask = down_cpumask;
+	cpumask_clear(&down_cpumask);
+	spin_unlock_irqrestore(&down_cpumask_lock, flags);
+
+	for_each_cpu(cpu, &tmp_mask) {
+		pcpu = &per_cpu(cpuinfo, cpu);
+
+		smp_rmb();
+
+		if (!pcpu->governor_enabled)
+			continue;
+
+		__cpufreq_driver_target(pcpu->policy,
+					pcpu->target_freq,
+					CPUFREQ_RELATION_H);
+		pcpu->freq_change_time_in_idle =
+			get_cpu_idle_time_us(cpu,
+					     &pcpu->freq_change_time);
+		dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
+	}
+}
+
+static ssize_t show_go_maxspeed_load(struct kobject *kobj,
+				     struct attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", go_maxspeed_load);
+}
+
+static ssize_t store_go_maxspeed_load(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return strict_strtoul(buf, 0, &go_maxspeed_load);
+}
+
+static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
+		show_go_maxspeed_load, store_go_maxspeed_load);
+
+static ssize_t show_min_sample_time(struct kobject *kobj,
+				struct attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return strict_strtoul(buf, 0, &min_sample_time);
+}
+
+static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
+		show_min_sample_time, store_min_sample_time);
+
+static struct attribute *interactive_attributes[] = {
+	&go_maxspeed_load_attr.attr,
+	&min_sample_time_attr.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group = {
+	.attrs = interactive_attributes,
+	.name = "interactive",
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+		unsigned int event)
+{
+	int rc;
+	unsigned int j;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_frequency_table *freq_table;
+
+	switch (event) {
+	case CPUFREQ_GOV_START:
+		if (!cpu_online(policy->cpu))
+			return -EINVAL;
+
+		freq_table =
+			cpufreq_frequency_get_table(policy->cpu);
+
+		for_each_cpu(j, policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, j);
+			pcpu->policy = policy;
+			pcpu->target_freq = policy->cur;
+			pcpu->freq_table = freq_table;
+			pcpu->freq_change_time_in_idle =
+				get_cpu_idle_time_us(j,
+					     &pcpu->freq_change_time);
+			pcpu->governor_enabled = 1;
+			smp_wmb();
+		}
+
+		/*
+		 * Do not register the idle hook and create sysfs
+		 * entries if we have already done so.
+		 */
+		if (atomic_inc_return(&active_count) > 1)
+			return 0;
+
+		rc = sysfs_create_group(cpufreq_global_kobject,
+				&interactive_attr_group);
+		if (rc)
+			return rc;
+
+		pm_idle_old = pm_idle;
+		pm_idle = cpufreq_interactive_idle;
+		break;
+
+	case CPUFREQ_GOV_STOP:
+		for_each_cpu(j, policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, j);
+			pcpu->governor_enabled = 0;
+			smp_wmb();
+			del_timer_sync(&pcpu->cpu_timer);
+
+			/*
+			 * Reset idle exit time since we may cancel the timer
+			 * before it can run after the last idle exit time,
+			 * to avoid tripping the check in idle exit for a timer
+			 * that is trying to run.
+			 */
+			pcpu->idle_exit_time = 0;
+		}
+
+		flush_work(&freq_scale_down_work);
+		if (atomic_dec_return(&active_count) > 0)
+			return 0;
+
+		sysfs_remove_group(cpufreq_global_kobject,
+				&interactive_attr_group);
+
+		pm_idle = pm_idle_old;
+		break;
+
+	case CPUFREQ_GOV_LIMITS:
+		if (policy->max < policy->cur)
+			__cpufreq_driver_target(policy,
+					policy->max, CPUFREQ_RELATION_H);
+		else if (policy->min > policy->cur)
+			__cpufreq_driver_target(policy,
+					policy->min, CPUFREQ_RELATION_L);
+		break;
+	}
+	return 0;
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+	unsigned int i;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+	go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
+	min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+
+	/* Initalize per-cpu timers */
+	for_each_possible_cpu(i) {
+		pcpu = &per_cpu(cpuinfo, i);
+		init_timer(&pcpu->cpu_timer);
+		pcpu->cpu_timer.function = cpufreq_interactive_timer;
+		pcpu->cpu_timer.data = i;
+	}
+
+	up_task = kthread_create(cpufreq_interactive_up_task, NULL,
+				 "kinteractiveup");
+	if (IS_ERR(up_task))
+		return PTR_ERR(up_task);
+
+	sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
+	get_task_struct(up_task);
+
+	/* No rescuer thread, bind to CPU queuing the work for possibly
+	   warm cache (probably doesn't matter much). */
+	down_wq = alloc_workqueue("knteractive_down", 0, 1);
+
+	if (! down_wq)
+		goto err_freeuptask;
+
+	INIT_WORK(&freq_scale_down_work,
+		  cpufreq_interactive_freq_down);
+
+	spin_lock_init(&up_cpumask_lock);
+	spin_lock_init(&down_cpumask_lock);
+
+#if DEBUG
+	spin_lock_init(&dbgpr_lock);
+	dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL);
+	dbg_proc->read_proc = dbg_proc_read;
+#endif
+
+	return cpufreq_register_governor(&cpufreq_gov_interactive);
+
+err_freeuptask:
+	put_task_struct(up_task);
+	return -ENOMEM;
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+	cpufreq_unregister_governor(&cpufreq_gov_interactive);
+	kthread_stop(up_task);
+	put_task_struct(up_task);
+	destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index faf7c52..c315ec9 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -317,6 +317,27 @@
 	return 0;
 }
 
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+	struct cpufreq_policy *policy;
+	struct cpufreq_frequency_table *table;
+	int ret = -ENODEV;
+
+	policy = cpufreq_cpu_get(cpu);
+	if (!policy)
+		return -ENODEV;
+
+	table = cpufreq_frequency_get_table(cpu);
+	if (!table)
+		goto out;
+
+	ret = cpufreq_stats_create_table(policy, table);
+
+out:
+	cpufreq_cpu_put(policy);
+	return ret;
+}
+
 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
 					       unsigned long action,
 					       void *hcpu)
@@ -335,6 +356,10 @@
 	case CPU_DEAD_FROZEN:
 		cpufreq_stats_free_table(cpu);
 		break;
+	case CPU_DOWN_FAILED:
+	case CPU_DOWN_FAILED_FROZEN:
+		cpufreq_stats_create_table_cpu(cpu);
+		break;
 	}
 	return NOTIFY_OK;
 }
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc92778..ca2d3b3 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y			+= drm/ vga/ stub/
+obj-y			+= drm/ vga/ stub/ ion/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
new file mode 100644
index 0000000..5b48b4e
--- /dev/null
+++ b/drivers/gpu/ion/Kconfig
@@ -0,0 +1,12 @@
+menuconfig ION
+	tristate "Ion Memory Manager"
+	select GENERIC_ALLOCATOR
+	help
+	  Chose this option to enable the ION Memory Manager.
+
+config ION_TEGRA
+	tristate "Ion for Tegra"
+	depends on ARCH_TEGRA && ION
+	help
+	  Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
new file mode 100644
index 0000000..73fe3fa
--- /dev/null
+++ b/drivers/gpu/ion/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
new file mode 100644
index 0000000..9cb5b25
--- /dev/null
+++ b/drivers/gpu/ion/ion.c
@@ -0,0 +1,1184 @@
+/*
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/ion.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "ion_priv.h"
+#define DEBUG
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:		the actual misc device
+ * @buffers:	an rb tree of all the existing buffers
+ * @lock:		lock protecting the buffers & heaps trees
+ * @heaps:		list of all the heaps in the system
+ * @user_clients:	list of all the clients created from userspace
+ */
+struct ion_device {
+	struct miscdevice dev;
+	struct rb_root buffers;
+	struct mutex lock;
+	struct rb_root heaps;
+	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+			      unsigned long arg);
+	struct rb_root user_clients;
+	struct rb_root kernel_clients;
+	struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @ref:		for reference counting the client
+ * @node:		node in the tree of all clients
+ * @dev:		backpointer to ion device
+ * @handles:		an rb tree of all the handles in this client
+ * @lock:		lock protecting the tree of handles
+ * @heap_mask:		mask of all supported heaps
+ * @name:		used for debugging
+ * @task:		used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+	struct kref ref;
+	struct rb_node node;
+	struct ion_device *dev;
+	struct rb_root handles;
+	struct mutex lock;
+	unsigned int heap_mask;
+	const char *name;
+	struct task_struct *task;
+	pid_t pid;
+	struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:		reference count
+ * @client:		back pointer to the client the buffer resides in
+ * @buffer:		pointer to the buffer
+ * @node:		node in the client's handle rbtree
+ * @kmap_cnt:		count of times this client has mapped to kernel
+ * @dmap_cnt:		count of times this client has mapped for dma
+ * @usermap_cnt:	count of times this client has mapped for userspace
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+	struct kref ref;
+	struct ion_client *client;
+	struct ion_buffer *buffer;
+	struct rb_node node;
+	unsigned int kmap_cnt;
+	unsigned int dmap_cnt;
+	unsigned int usermap_cnt;
+};
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+			   struct ion_buffer *buffer)
+{
+	struct rb_node **p = &dev->buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_buffer *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_buffer, node);
+
+		if (buffer < entry) {
+			p = &(*p)->rb_left;
+		} else if (buffer > entry) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: buffer already found.", __func__);
+			BUG();
+		}
+	}
+
+	rb_link_node(&buffer->node, parent, p);
+	rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+				     struct ion_device *dev,
+				     unsigned long len,
+				     unsigned long align,
+				     unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	int ret;
+
+	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+	if (!buffer)
+		return ERR_PTR(-ENOMEM);
+
+	buffer->heap = heap;
+	kref_init(&buffer->ref);
+
+	ret = heap->ops->allocate(heap, buffer, len, align, flags);
+	if (ret) {
+		kfree(buffer);
+		return ERR_PTR(ret);
+	}
+	buffer->dev = dev;
+	buffer->size = len;
+	mutex_init(&buffer->lock);
+	ion_buffer_add(dev, buffer);
+	return buffer;
+}
+
+static void ion_buffer_destroy(struct kref *kref)
+{
+	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+	struct ion_device *dev = buffer->dev;
+
+	buffer->heap->ops->free(buffer);
+	mutex_lock(&dev->lock);
+	rb_erase(&buffer->node, &dev->buffers);
+	mutex_unlock(&dev->lock);
+	kfree(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+	kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+	return kref_put(&buffer->ref, ion_buffer_destroy);
+}
+
+struct ion_handle *ion_handle_create(struct ion_client *client,
+				     struct ion_buffer *buffer)
+{
+	struct ion_handle *handle;
+
+	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+	if (!handle)
+		return ERR_PTR(-ENOMEM);
+	kref_init(&handle->ref);
+	handle->client = client;
+	ion_buffer_get(buffer);
+	handle->buffer = buffer;
+
+	return handle;
+}
+
+static void ion_handle_destroy(struct kref *kref)
+{
+	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+	/* XXX Can a handle be destroyed while it's map count is non-zero?:
+	   if (handle->map_cnt) unmap
+	 */
+	ion_buffer_put(handle->buffer);
+	mutex_lock(&handle->client->lock);
+	rb_erase(&handle->node, &handle->client->handles);
+	mutex_unlock(&handle->client->lock);
+	kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+	return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+	kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+	return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+					    struct ion_buffer *buffer)
+{
+	struct rb_node *n;
+
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		if (handle->buffer == buffer)
+			return handle;
+	}
+	return NULL;
+}
+
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+{
+	struct rb_node *n = client->handles.rb_node;
+
+	while (n) {
+		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
+							  node);
+		if (handle < handle_node)
+			n = n->rb_left;
+		else if (handle > handle_node)
+			n = n->rb_right;
+		else
+			return true;
+	}
+	return false;
+}
+
+static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+	struct rb_node **p = &client->handles.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_handle *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_handle, node);
+
+		if (handle < entry)
+			p = &(*p)->rb_left;
+		else if (handle > entry)
+			p = &(*p)->rb_right;
+		else
+			WARN(1, "%s: buffer already found.", __func__);
+	}
+
+	rb_link_node(&handle->node, parent, p);
+	rb_insert_color(&handle->node, &client->handles);
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int flags)
+{
+	struct rb_node *n;
+	struct ion_handle *handle;
+	struct ion_device *dev = client->dev;
+	struct ion_buffer *buffer = NULL;
+
+	/*
+	 * traverse the list of heaps available in this system in priority
+	 * order.  If the heap type is supported by the client, and matches the
+	 * request of the caller allocate from it.  Repeat until allocate has
+	 * succeeded or all heaps have been tried
+	 */
+	mutex_lock(&dev->lock);
+	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
+		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
+		/* if the client doesn't support this heap type */
+		if (!((1 << heap->type) & client->heap_mask))
+			continue;
+		/* if the caller didn't specify this heap type */
+		if (!((1 << heap->id) & flags))
+			continue;
+		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		if (!IS_ERR_OR_NULL(buffer))
+			break;
+	}
+	mutex_unlock(&dev->lock);
+
+	if (IS_ERR_OR_NULL(buffer))
+		return ERR_PTR(PTR_ERR(buffer));
+
+	handle = ion_handle_create(client, buffer);
+
+	if (IS_ERR_OR_NULL(handle))
+		goto end;
+
+	/*
+	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
+	 * and ion_handle_create will take a second reference, drop one here
+	 */
+	ion_buffer_put(buffer);
+
+	mutex_lock(&client->lock);
+	ion_handle_add(client, handle);
+	mutex_unlock(&client->lock);
+	return handle;
+
+end:
+	ion_buffer_put(buffer);
+	return handle;
+}
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+	bool valid_handle;
+
+	BUG_ON(client != handle->client);
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	mutex_unlock(&client->lock);
+
+	if (!valid_handle) {
+		WARN("%s: invalid handle passed to free.\n", __func__);
+		return;
+	}
+	ion_handle_put(handle);
+}
+
+static void ion_client_get(struct ion_client *client);
+static int ion_client_put(struct ion_client *client);
+
+bool _ion_map(int *buffer_cnt, int *handle_cnt)
+{
+	bool map;
+
+	BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
+
+	if (*buffer_cnt)
+		map = false;
+	else
+		map = true;
+	if (*handle_cnt == 0)
+		(*buffer_cnt)++;
+	(*handle_cnt)++;
+	return map;
+}
+
+bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
+{
+	BUG_ON(*handle_cnt == 0);
+	(*handle_cnt)--;
+	if (*handle_cnt != 0)
+		return false;
+	BUG_ON(*buffer_cnt == 0);
+	(*buffer_cnt)--;
+	if (*buffer_cnt == 0)
+		return true;
+	return false;
+}
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_buffer *buffer;
+	int ret;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+
+	if (!buffer->heap->ops->phys) {
+		pr_err("%s: ion_phys is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -ENODEV;
+	}
+	mutex_unlock(&client->lock);
+	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+	return ret;
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	void *vaddr;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_kernel.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+
+	if (!handle->buffer->heap->ops->map_kernel) {
+		pr_err("%s: map_kernel is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&buffer->lock);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
+		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+		if (IS_ERR_OR_NULL(vaddr))
+			_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
+		buffer->vaddr = vaddr;
+	} else {
+		vaddr = buffer->vaddr;
+	}
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return vaddr;
+}
+
+struct scatterlist *ion_map_dma(struct ion_client *client,
+				struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct scatterlist *sglist;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_dma.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+
+	if (!handle->buffer->heap->ops->map_dma) {
+		pr_err("%s: map_kernel is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&buffer->lock);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-ENODEV);
+	}
+	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
+		sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
+		if (IS_ERR_OR_NULL(sglist))
+			_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
+		buffer->sglist = sglist;
+	} else {
+		sglist = buffer->sglist;
+	}
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return sglist;
+}
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+		buffer->vaddr = NULL;
+	}
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+}
+
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
+		buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+		buffer->sglist = NULL;
+	}
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+}
+
+
+struct ion_buffer *ion_share(struct ion_client *client,
+				 struct ion_handle *handle)
+{
+	bool valid_handle;
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	mutex_unlock(&client->lock);
+	if (!valid_handle) {
+		WARN("%s: invalid handle passed to share.\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* don't not take an extra refernce here, the burden is on the caller
+	 * to make sure the buffer doesn't go away while it's passing it
+	 * to another client -- ion_free should not be called on this handle
+	 * until the buffer has been imported into the other client
+	 */
+	return handle->buffer;
+}
+
+struct ion_handle *ion_import(struct ion_client *client,
+			      struct ion_buffer *buffer)
+{
+	struct ion_handle *handle = NULL;
+
+	mutex_lock(&client->lock);
+	/* if a handle exists for this buffer just take a reference to it */
+	handle = ion_handle_lookup(client, buffer);
+	if (!IS_ERR_OR_NULL(handle)) {
+		ion_handle_get(handle);
+		goto end;
+	}
+	handle = ion_handle_create(client, buffer);
+	if (IS_ERR_OR_NULL(handle))
+		goto end;
+	ion_handle_add(client, handle);
+end:
+	mutex_unlock(&client->lock);
+	return handle;
+}
+
+static const struct file_operations ion_share_fops;
+
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
+{
+	struct file *file = fget(fd);
+	struct ion_handle *handle;
+
+	if (!file) {
+		pr_err("%s: imported fd not found in file table.\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	if (file->f_op != &ion_share_fops) {
+		pr_err("%s: imported file is not a shared ion file.\n",
+		       __func__);
+		handle = ERR_PTR(-EINVAL);
+		goto end;
+	}
+	handle = ion_import(client, file->private_data);
+end:
+	fput(file);
+	return handle;
+}
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+	struct ion_client *client = s->private;
+	struct rb_node *n;
+	size_t sizes[ION_NUM_HEAPS] = {0};
+	const char *names[ION_NUM_HEAPS] = {0};
+	int i;
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		enum ion_heap_type type = handle->buffer->heap->type;
+
+		if (!names[type])
+			names[type] = handle->buffer->heap->name;
+		sizes[type] += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+
+	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+	for (i = 0; i < ION_NUM_HEAPS; i++) {
+		if (!names[i])
+			continue;
+		seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
+			   atomic_read(&client->ref.refcount));
+	}
+	return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+	.open = ion_debug_client_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct ion_client *ion_client_lookup(struct ion_device *dev,
+					    struct task_struct *task)
+{
+	struct rb_node *n = dev->user_clients.rb_node;
+	struct ion_client *client;
+
+	mutex_lock(&dev->lock);
+	while (n) {
+		client = rb_entry(n, struct ion_client, node);
+		if (task == client->task) {
+			ion_client_get(client);
+			mutex_unlock(&dev->lock);
+			return client;
+		} else if (task < client->task) {
+			n = n->rb_left;
+		} else if (task > client->task) {
+			n = n->rb_right;
+		}
+	}
+	mutex_unlock(&dev->lock);
+	return NULL;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     unsigned int heap_mask,
+				     const char *name)
+{
+	struct ion_client *client;
+	struct task_struct *task;
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+	struct ion_client *entry;
+	char debug_name[64];
+	pid_t pid;
+
+	get_task_struct(current->group_leader);
+	task_lock(current->group_leader);
+	pid = task_pid_nr(current->group_leader);
+	/* don't bother to store task struct for kernel threads,
+	   they can't be killed anyway */
+	if (current->group_leader->flags & PF_KTHREAD) {
+		put_task_struct(current->group_leader);
+		task = NULL;
+	} else {
+		task = current->group_leader;
+	}
+	task_unlock(current->group_leader);
+
+	/* if this isn't a kernel thread, see if a client already
+	   exists */
+	if (task) {
+		client = ion_client_lookup(dev, task);
+		if (!IS_ERR_OR_NULL(client)) {
+			put_task_struct(current->group_leader);
+			return client;
+		}
+	}
+
+	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+	if (!client) {
+		put_task_struct(current->group_leader);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	client->dev = dev;
+	client->handles = RB_ROOT;
+	mutex_init(&client->lock);
+	client->name = name;
+	client->heap_mask = heap_mask;
+	client->task = task;
+	client->pid = pid;
+	kref_init(&client->ref);
+
+	mutex_lock(&dev->lock);
+	if (task) {
+		p = &dev->user_clients.rb_node;
+		while (*p) {
+			parent = *p;
+			entry = rb_entry(parent, struct ion_client, node);
+
+			if (task < entry->task)
+				p = &(*p)->rb_left;
+			else if (task > entry->task)
+				p = &(*p)->rb_right;
+		}
+		rb_link_node(&client->node, parent, p);
+		rb_insert_color(&client->node, &dev->user_clients);
+	} else {
+		p = &dev->kernel_clients.rb_node;
+		while (*p) {
+			parent = *p;
+			entry = rb_entry(parent, struct ion_client, node);
+
+			if (client < entry)
+				p = &(*p)->rb_left;
+			else if (client > entry)
+				p = &(*p)->rb_right;
+		}
+		rb_link_node(&client->node, parent, p);
+		rb_insert_color(&client->node, &dev->kernel_clients);
+	}
+
+	snprintf(debug_name, 64, "%u", client->pid);
+	client->debug_root = debugfs_create_file(debug_name, 0664,
+						 dev->debug_root, client,
+						 &debug_client_fops);
+	mutex_unlock(&dev->lock);
+
+	return client;
+}
+
+static void _ion_client_destroy(struct kref *kref)
+{
+	struct ion_client *client = container_of(kref, struct ion_client, ref);
+	struct ion_device *dev = client->dev;
+	struct rb_node *n;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	while ((n = rb_first(&client->handles))) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		ion_handle_destroy(&handle->ref);
+	}
+	mutex_lock(&dev->lock);
+	if (client->task) {
+		rb_erase(&client->node, &dev->user_clients);
+		put_task_struct(client->task);
+	} else {
+		rb_erase(&client->node, &dev->kernel_clients);
+	}
+	debugfs_remove_recursive(client->debug_root);
+	mutex_unlock(&dev->lock);
+
+	kfree(client);
+}
+
+static void ion_client_get(struct ion_client *client)
+{
+	kref_get(&client->ref);
+}
+
+static int ion_client_put(struct ion_client *client)
+{
+	return kref_put(&client->ref, _ion_client_destroy);
+}
+
+void ion_client_destroy(struct ion_client *client)
+{
+	ion_client_put(client);
+}
+
+static int ion_share_release(struct inode *inode, struct file* file)
+{
+	struct ion_buffer *buffer = file->private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	/* drop the reference to the buffer -- this prevents the
+	   buffer from going away because the client holding it exited
+	   while it was being passed */
+	ion_buffer_put(buffer);
+	return 0;
+}
+
+static void ion_vma_open(struct vm_area_struct *vma)
+{
+
+	struct ion_buffer *buffer = vma->vm_file->private_data;
+	struct ion_handle *handle = vma->vm_private_data;
+	struct ion_client *client;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	/* check that the client still exists and take a reference so
+	   it can't go away until this vma is closed */
+	client = ion_client_lookup(buffer->dev, current->group_leader);
+	if (IS_ERR_OR_NULL(client)) {
+		vma->vm_private_data = NULL;
+		return;
+	}
+	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+		 __func__, __LINE__,
+		 atomic_read(&client->ref.refcount),
+		 atomic_read(&handle->ref.refcount),
+		 atomic_read(&buffer->ref.refcount));
+}
+
+static void ion_vma_close(struct vm_area_struct *vma)
+{
+	struct ion_handle *handle = vma->vm_private_data;
+	struct ion_buffer *buffer = vma->vm_file->private_data;
+	struct ion_client *client;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	/* this indicates the client is gone, nothing to do here */
+	if (!handle)
+		return;
+	client = handle->client;
+	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+		 __func__, __LINE__,
+		 atomic_read(&client->ref.refcount),
+		 atomic_read(&handle->ref.refcount),
+		 atomic_read(&buffer->ref.refcount));
+	ion_handle_put(handle);
+	ion_client_put(client);
+	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+		 __func__, __LINE__,
+		 atomic_read(&client->ref.refcount),
+		 atomic_read(&handle->ref.refcount),
+		 atomic_read(&buffer->ref.refcount));
+}
+
+static struct vm_operations_struct ion_vm_ops = {
+	.open = ion_vma_open,
+	.close = ion_vma_close,
+};
+
+static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = file->private_data;
+	unsigned long size = vma->vm_end - vma->vm_start;
+	struct ion_client *client;
+	struct ion_handle *handle;
+	int ret;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	/* make sure the client still exists, it's possible for the client to
+	   have gone away but the map/share fd still to be around, take
+	   a reference to it so it can't go away while this mapping exists */
+	client = ion_client_lookup(buffer->dev, current->group_leader);
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("%s: trying to mmap an ion handle in a process with no "
+		       "ion client\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
+				     buffer->size)) {
+		pr_err("%s: trying to map larger area than handle has available"
+		       "\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* find the handle and take a reference to it */
+	handle = ion_import(client, buffer);
+	if (IS_ERR_OR_NULL(handle)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (!handle->buffer->heap->ops->map_user) {
+		pr_err("%s: this heap does not define a method for mapping "
+		       "to userspace\n", __func__);
+		ret = -EINVAL;
+		goto err1;
+	}
+
+	mutex_lock(&buffer->lock);
+	/* now map it to userspace */
+	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+	mutex_unlock(&buffer->lock);
+	if (ret) {
+		pr_err("%s: failure mapping buffer to userspace\n",
+		       __func__);
+		goto err1;
+	}
+
+	vma->vm_ops = &ion_vm_ops;
+	/* move the handle into the vm_private_data so we can access it from
+	   vma_open/close */
+	vma->vm_private_data = handle;
+	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
+		 __func__, __LINE__,
+		 atomic_read(&client->ref.refcount),
+		 atomic_read(&handle->ref.refcount),
+		 atomic_read(&buffer->ref.refcount));
+	return 0;
+
+err1:
+	/* drop the reference to the handle */
+	ion_handle_put(handle);
+err:
+	/* drop the refernce to the client */
+	ion_client_put(client);
+	return ret;
+}
+
+static const struct file_operations ion_share_fops = {
+	.owner		= THIS_MODULE,
+	.release	= ion_share_release,
+	.mmap		= ion_share_mmap,
+};
+
+static int ion_ioctl_share(struct file *parent, struct ion_client *client,
+			   struct ion_handle *handle)
+{
+	int fd = get_unused_fd();
+	struct file *file;
+
+	if (fd < 0)
+		return -ENFILE;
+
+	file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
+				  handle->buffer, O_RDWR);
+	if (IS_ERR_OR_NULL(file))
+		goto err;
+	ion_buffer_get(handle->buffer);
+	fd_install(fd, file);
+
+	return fd;
+
+err:
+	put_unused_fd(fd);
+	return -ENFILE;
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct ion_client *client = filp->private_data;
+
+	switch (cmd) {
+	case ION_IOC_ALLOC:
+	{
+		struct ion_allocation_data data;
+
+		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+			return -EFAULT;
+		data.handle = ion_alloc(client, data.len, data.align,
+					     data.flags);
+		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+			return -EFAULT;
+		break;
+	}
+	case ION_IOC_FREE:
+	{
+		struct ion_handle_data data;
+		bool valid;
+
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(struct ion_handle_data)))
+			return -EFAULT;
+		mutex_lock(&client->lock);
+		valid = ion_handle_validate(client, data.handle);
+		mutex_unlock(&client->lock);
+		if (!valid)
+			return -EINVAL;
+		ion_free(client, data.handle);
+		break;
+	}
+	case ION_IOC_MAP:
+	case ION_IOC_SHARE:
+	{
+		struct ion_fd_data data;
+
+		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+			return -EFAULT;
+		mutex_lock(&client->lock);
+		if (!ion_handle_validate(client, data.handle)) {
+			pr_err("%s: invalid handle passed to share ioctl.\n",
+			       __func__);
+			mutex_unlock(&client->lock);
+			return -EINVAL;
+		}
+		data.fd = ion_ioctl_share(filp, client, data.handle);
+		mutex_unlock(&client->lock);
+		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+			return -EFAULT;
+		break;
+	}
+	case ION_IOC_IMPORT:
+	{
+		struct ion_fd_data data;
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(struct ion_fd_data)))
+			return -EFAULT;
+
+		data.handle = ion_import_fd(client, data.fd);
+		if (IS_ERR(data.handle))
+			data.handle = NULL;
+		if (copy_to_user((void __user *)arg, &data,
+				 sizeof(struct ion_fd_data)))
+			return -EFAULT;
+		break;
+	}
+	case ION_IOC_CUSTOM:
+	{
+		struct ion_device *dev = client->dev;
+		struct ion_custom_data data;
+
+		if (!dev->custom_ioctl)
+			return -ENOTTY;
+		if (copy_from_user(&data, (void __user *)arg,
+				sizeof(struct ion_custom_data)))
+			return -EFAULT;
+		return dev->custom_ioctl(client, data.cmd, data.arg);
+	}
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+	struct ion_client *client = file->private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	ion_client_put(client);
+	return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+	struct miscdevice *miscdev = file->private_data;
+	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+	struct ion_client *client;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	client = ion_client_create(dev, -1, "user");
+	if (IS_ERR_OR_NULL(client))
+		return PTR_ERR(client);
+	file->private_data = client;
+
+	return 0;
+}
+
+static const struct file_operations ion_fops = {
+	.owner          = THIS_MODULE,
+	.open           = ion_open,
+	.release        = ion_release,
+	.unlocked_ioctl = ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+				   enum ion_heap_type type)
+{
+	size_t size = 0;
+	struct rb_node *n;
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n,
+						     struct ion_handle,
+						     node);
+		if (handle->buffer->heap->type == type)
+			size += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+	return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+	struct ion_heap *heap = s->private;
+	struct ion_device *dev = heap->dev;
+	struct rb_node *n;
+
+	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+	for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						     node);
+		char task_comm[TASK_COMM_LEN];
+		size_t size = ion_debug_heap_total(client, heap->type);
+		if (!size)
+			continue;
+
+		get_task_comm(task_comm, client->task);
+		seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
+			   size);
+	}
+
+	for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						     node);
+		size_t size = ion_debug_heap_total(client, heap->type);
+		if (!size)
+			continue;
+		seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
+			   size);
+	}
+	return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+	.open = ion_debug_heap_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+	struct rb_node **p = &dev->heaps.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_heap *entry;
+
+	heap->dev = dev;
+	mutex_lock(&dev->lock);
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_heap, node);
+
+		if (heap->id < entry->id) {
+			p = &(*p)->rb_left;
+		} else if (heap->id > entry->id ) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: can not insert multiple heaps with "
+				"id %d\n", __func__, heap->id);
+			goto end;
+		}
+	}
+
+	rb_link_node(&heap->node, parent, p);
+	rb_insert_color(&heap->node, &dev->heaps);
+	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+			    &debug_heap_fops);
+end:
+	mutex_unlock(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg))
+{
+	struct ion_device *idev;
+	int ret;
+
+	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+	if (!idev)
+		return ERR_PTR(-ENOMEM);
+
+	idev->dev.minor = MISC_DYNAMIC_MINOR;
+	idev->dev.name = "ion";
+	idev->dev.fops = &ion_fops;
+	idev->dev.parent = NULL;
+	ret = misc_register(&idev->dev);
+	if (ret) {
+		pr_err("ion: failed to register misc device.\n");
+		return ERR_PTR(ret);
+	}
+
+	idev->debug_root = debugfs_create_dir("ion", NULL);
+	if (IS_ERR_OR_NULL(idev->debug_root))
+		pr_err("ion: failed to create debug files.\n");
+
+	idev->custom_ioctl = custom_ioctl;
+	idev->buffers = RB_ROOT;
+	mutex_init(&idev->lock);
+	idev->heaps = RB_ROOT;
+	idev->user_clients = RB_ROOT;
+	idev->kernel_clients = RB_ROOT;
+	return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+	misc_deregister(&dev->dev);
+	/* XXX need to free the heaps and clients ? */
+	kfree(dev);
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
new file mode 100644
index 0000000..606adae
--- /dev/null
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -0,0 +1,162 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+
+struct ion_carveout_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+				      unsigned long size,
+				      unsigned long align)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+	if (!offset)
+		return ION_CARVEOUT_ALLOCATE_FAIL;
+
+	return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+
+	if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+		return;
+	gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+				  struct ion_buffer *buffer,
+				  ion_phys_addr_t *addr, size_t *len)
+{
+	*addr = buffer->priv_phys;
+	*len = buffer->size;
+	return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	buffer->priv_phys = ion_carveout_allocate(heap, size, align);
+	return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+
+	ion_carveout_free(heap, buffer->priv_phys, buffer->size);
+	buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
+}
+
+struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
+					      struct ion_buffer *buffer)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	return;
+}
+
+void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
+				   struct ion_buffer *buffer)
+{
+	return __arch_ioremap(buffer->priv_phys, buffer->size,
+			      MT_MEMORY_NONCACHED);
+}
+
+void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
+				    struct ion_buffer *buffer)
+{
+	__arch_iounmap(buffer->vaddr);
+	buffer->vaddr = NULL;
+	return;
+}
+
+int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			       struct vm_area_struct *vma)
+{
+	return remap_pfn_range(vma, vma->vm_start,
+			       __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+			       buffer->size,
+			       pgprot_noncached(vma->vm_page_prot));
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+	.allocate = ion_carveout_heap_allocate,
+	.free = ion_carveout_heap_free,
+	.phys = ion_carveout_heap_phys,
+	.map_user = ion_carveout_heap_map_user,
+	.map_kernel = ion_carveout_heap_map_kernel,
+	.unmap_kernel = ion_carveout_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_carveout_heap *carveout_heap;
+
+	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+	if (!carveout_heap)
+		return ERR_PTR(-ENOMEM);
+
+	carveout_heap->pool = gen_pool_create(12, -1);
+	if (!carveout_heap->pool) {
+		kfree(carveout_heap);
+		return ERR_PTR(-ENOMEM);
+	}
+	carveout_heap->base = heap_data->base;
+	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+		     -1);
+	carveout_heap->heap.ops = &carveout_heap_ops;
+	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+
+	return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_carveout_heap *carveout_heap =
+	     container_of(heap, struct  ion_carveout_heap, heap);
+
+	gen_pool_destroy(carveout_heap->pool);
+	kfree(carveout_heap);
+	carveout_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
new file mode 100644
index 0000000..8ce3c19
--- /dev/null
+++ b/drivers/gpu/ion/ion_heap.c
@@ -0,0 +1,72 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include "ion_priv.h"
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch (heap_data->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		heap = ion_system_contig_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		heap = ion_system_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		heap = ion_carveout_heap_create(heap_data);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap_data->type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+		       __func__, heap_data->name, heap_data->type,
+		       heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		ion_system_contig_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		ion_system_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		ion_carveout_heap_destroy(heap);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap->type);
+	}
+}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
new file mode 100644
index 0000000..3323954
--- /dev/null
+++ b/drivers/gpu/ion/ion_priv.h
@@ -0,0 +1,184 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/ion.h>
+
+struct ion_mapping;
+
+struct ion_dma_mapping {
+	struct kref ref;
+	struct scatterlist *sglist;
+};
+
+struct ion_kernel_mapping {
+	struct kref ref;
+	void *vaddr;
+};
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref:		refernce count
+ * @node:		node in the ion_device buffers tree
+ * @dev:		back pointer to the ion_device
+ * @heap:		back pointer to the heap the buffer came from
+ * @flags:		buffer specific flags
+ * @size:		size of the buffer
+ * @priv_virt:		private data to the buffer representable as
+ *			a void *
+ * @priv_phys:		private data to the buffer representable as
+ *			an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock:		protects the buffers cnt fields
+ * @kmap_cnt:		number of times the buffer is mapped to the kernel
+ * @vaddr:		the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt:		number of times the buffer is mapped for dma
+ * @sglist:		the scatterlist for the buffer is dmap_cnt is not zero
+*/
+struct ion_buffer {
+	struct kref ref;
+	struct rb_node node;
+	struct ion_device *dev;
+	struct ion_heap *heap;
+	unsigned long flags;
+	size_t size;
+	union {
+		void *priv_virt;
+		ion_phys_addr_t priv_phys;
+	};
+	struct mutex lock;
+	int kmap_cnt;
+	void *vaddr;
+	int dmap_cnt;
+	struct scatterlist *sglist;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:		allocate memory
+ * @free:		free memory
+ * @phys		get physical address of a buffer (only define on
+ *			physically contiguous heaps)
+ * @map_dma		map the memory for dma to a scatterlist
+ * @unmap_dma		unmap the memory for dma
+ * @map_kernel		map memory to the kernel
+ * @unmap_kernel	unmap memory to the kernel
+ * @map_user		map memory to userspace
+ */
+struct ion_heap_ops {
+	int (*allocate) (struct ion_heap *heap,
+			 struct ion_buffer *buffer, unsigned long len,
+			 unsigned long align, unsigned long flags);
+	void (*free) (struct ion_buffer *buffer);
+	int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+		     ion_phys_addr_t *addr, size_t *len);
+	struct scatterlist *(*map_dma) (struct ion_heap *heap,
+					struct ion_buffer *buffer);
+	void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+			 struct vm_area_struct *vma);
+};
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:		rb node to put the heap on the device's tree of heaps
+ * @dev:		back pointer to the ion_device
+ * @type:		type of heap
+ * @ops:		ops struct as above
+ * @id:			id of heap, also indicates priority of this heap when
+ *			allocating.  These are specified by platform data and
+ *			MUST be unique
+ * @name:		used for debugging
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+	struct rb_node node;
+	struct ion_device *dev;
+	enum ion_heap_type type;
+	struct ion_heap_ops *ops;
+	int id;
+	const char *name;
+};
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl:	arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev:		the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev:		the device
+ * @heap:		the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+				      unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
new file mode 100644
index 0000000..c046cf1
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -0,0 +1,198 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+				     struct ion_buffer *buffer,
+				     unsigned long size, unsigned long align,
+				     unsigned long flags)
+{
+	buffer->priv_virt = vmalloc_user(size);
+	if (!buffer->priv_virt)
+		return -ENOMEM;
+	return 0;
+}
+
+void ion_system_heap_free(struct ion_buffer *buffer)
+{
+	vfree(buffer->priv_virt);
+}
+
+struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
+					    struct ion_buffer *buffer)
+{
+	struct scatterlist *sglist;
+	struct page *page;
+	int i;
+	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	void *vaddr = buffer->priv_virt;
+
+	sglist = vmalloc(npages * sizeof(struct scatterlist));
+	if (!sglist)
+		return ERR_PTR(-ENOMEM);
+	memset(sglist, 0, npages * sizeof(struct scatterlist));
+	sg_init_table(sglist, npages);
+	for (i = 0; i < npages; i++) {
+		page = vmalloc_to_page(vaddr);
+		if (!page)
+			goto end;
+		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
+		vaddr += PAGE_SIZE;
+	}
+	/* XXX do cache maintenance for dma? */
+	return sglist;
+end:
+	vfree(sglist);
+	return NULL;
+}
+
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+	/* XXX undo cache maintenance for dma? */
+	if (buffer->sglist)
+		vfree(buffer->sglist);
+}
+
+void *ion_system_heap_map_kernel(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+void ion_system_heap_unmap_kernel(struct ion_heap *heap,
+				  struct ion_buffer *buffer)
+{
+}
+
+int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+			     struct vm_area_struct *vma)
+{
+	return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff);
+}
+
+static struct ion_heap_ops vmalloc_ops = {
+	.allocate = ion_system_heap_allocate,
+	.free = ion_system_heap_free,
+	.map_dma = ion_system_heap_map_dma,
+	.unmap_dma = ion_system_heap_unmap_dma,
+	.map_kernel = ion_system_heap_map_kernel,
+	.unmap_kernel = ion_system_heap_unmap_kernel,
+	.map_user = ion_system_heap_map_user,
+};
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->ops = &vmalloc_ops;
+	heap->type = ION_HEAP_TYPE_SYSTEM;
+	return heap;
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+					   struct ion_buffer *buffer,
+					   unsigned long len,
+					   unsigned long align,
+					   unsigned long flags)
+{
+	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
+	if (!buffer->priv_virt)
+		return -ENOMEM;
+	return 0;
+}
+
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+	kfree(buffer->priv_virt);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+				       struct ion_buffer *buffer,
+				       ion_phys_addr_t *addr, size_t *len)
+{
+	*addr = virt_to_phys(buffer->priv_virt);
+	*len = buffer->size;
+	return 0;
+}
+
+struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+						   struct ion_buffer *buffer)
+{
+	struct scatterlist *sglist;
+
+	sglist = vmalloc(sizeof(struct scatterlist));
+	if (!sglist)
+		return ERR_PTR(-ENOMEM);
+	sg_init_table(sglist, 1);
+	sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
+	return sglist;
+}
+
+int ion_system_contig_heap_map_user(struct ion_heap *heap,
+				    struct ion_buffer *buffer,
+				    struct vm_area_struct *vma)
+{
+	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
+	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start,
+			       vma->vm_page_prot);
+
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+	.allocate = ion_system_contig_heap_allocate,
+	.free = ion_system_contig_heap_free,
+	.phys = ion_system_contig_heap_phys,
+	.map_dma = ion_system_contig_heap_map_dma,
+	.unmap_dma = ion_system_heap_unmap_dma,
+	.map_kernel = ion_system_heap_map_kernel,
+	.unmap_kernel = ion_system_heap_unmap_kernel,
+	.map_user = ion_system_contig_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->ops = &kmalloc_ops;
+	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+	return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
+
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
new file mode 100644
index 0000000..692458e
--- /dev/null
+++ b/drivers/gpu/ion/ion_system_mapper.c
@@ -0,0 +1,114 @@
+/*
+ * drivers/gpu/ion/ion_system_mapper.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion_priv.h"
+/*
+ * This mapper is valid for any heap that allocates memory that already has
+ * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
+ * pages obtained via io_remap, etc.
+ */
+static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
+				   struct ion_buffer *buffer,
+				   struct ion_mapping **mapping)
+{
+	if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+		pr_err("%s: attempting to map an unsupported heap\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	/* XXX REVISIT ME!!! */
+	*((unsigned long *)mapping) = (unsigned long)buffer->priv;
+	return buffer->priv;
+}
+
+static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
+				    struct ion_buffer *buffer,
+				    struct ion_mapping *mapping)
+{
+	if (!((1 << buffer->heap->type) & mapper->heap_mask))
+		pr_err("%s: attempting to unmap an unsupported heap\n",
+		       __func__);
+}
+
+static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
+					struct ion_buffer *buffer,
+					struct ion_mapping *mapping)
+{
+	if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
+		pr_err("%s: attempting to unmap an unsupported heap\n",
+		       __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	return buffer->priv;
+}
+
+static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
+				      struct ion_buffer *buffer,
+				      struct vm_area_struct *vma,
+				      struct ion_mapping *mapping)
+{
+	int ret;
+
+	switch (buffer->heap->type) {
+	case ION_HEAP_KMALLOC:
+	{
+		unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
+		ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+				      vma->vm_end - vma->vm_start,
+				      vma->vm_page_prot);
+		break;
+	}
+	case ION_HEAP_VMALLOC:
+		ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
+		break;
+	default:
+		pr_err("%s: attempting to map unsupported heap to userspace\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct ion_mapper_ops ops = {
+	.map = ion_kernel_mapper_map,
+	.map_kernel = ion_kernel_mapper_map_kernel,
+	.map_user = ion_kernel_mapper_map_user,
+	.unmap = ion_kernel_mapper_unmap,
+};
+
+struct ion_mapper *ion_system_mapper_create(void)
+{
+	struct ion_mapper *mapper;
+	mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
+	if (!mapper)
+		return ERR_PTR(-ENOMEM);
+	mapper->type = ION_SYSTEM_MAPPER;
+	mapper->ops = &ops;
+	mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
+	return mapper;
+}
+
+void ion_system_mapper_destroy(struct ion_mapper *mapper)
+{
+	kfree(mapper);
+}
+
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
new file mode 100644
index 0000000..11cd003
--- /dev/null
+++ b/drivers/gpu/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
new file mode 100644
index 0000000..7af6e16
--- /dev/null
+++ b/drivers/gpu/ion/tegra/tegra_ion.c
@@ -0,0 +1,96 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion_priv.h"
+
+struct ion_device *idev;
+struct ion_mapper *tegra_user_mapper;
+int num_heaps;
+struct ion_heap **heaps;
+
+int tegra_ion_probe(struct platform_device *pdev)
+{
+	struct ion_platform_data *pdata = pdev->dev.platform_data;
+	int err;
+	int i;
+
+	num_heaps = pdata->nr;
+
+	heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+	idev = ion_device_create(NULL);
+	if (IS_ERR_OR_NULL(idev)) {
+		kfree(heaps);
+		return PTR_ERR(idev);
+	}
+
+	/* create the heaps as specified in the board file */
+	for (i = 0; i < num_heaps; i++) {
+		struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+		heaps[i] = ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			err = PTR_ERR(heaps[i]);
+			goto err;
+		}
+		ion_device_add_heap(idev, heaps[i]);
+	}
+	platform_set_drvdata(pdev, idev);
+	return 0;
+err:
+	for (i = 0; i < num_heaps; i++) {
+		if (heaps[i])
+			ion_heap_destroy(heaps[i]);
+	}
+	kfree(heaps);
+	return err;
+}
+
+int tegra_ion_remove(struct platform_device *pdev)
+{
+	struct ion_device *idev = platform_get_drvdata(pdev);
+	int i;
+
+	ion_device_destroy(idev);
+	for (i = 0; i < num_heaps; i++)
+		ion_heap_destroy(heaps[i]);
+	kfree(heaps);
+	return 0;
+}
+
+static struct platform_driver ion_driver = {
+	.probe = tegra_ion_probe,
+	.remove = tegra_ion_remove,
+	.driver = { .name = "ion-tegra" }
+};
+
+static int __init ion_init(void)
+{
+	return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+	platform_driver_unregister(&ion_driver);
+}
+
+module_init(ion_init);
+module_exit(ion_exit);
+
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 23e82e4..c0e639c 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -161,6 +161,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called apm-power.
 
+config INPUT_KEYRESET
+	tristate "Reset key"
+	depends on INPUT
+	---help---
+	  Say Y here if you want to reboot when some keys are pressed;
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keyreset.
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 0c78949..5d4593d 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -24,3 +24,4 @@
 obj-$(CONFIG_INPUT_MISC)	+= misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)	+= apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET)	+= keyreset.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4cf2534..07b6c81 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -23,6 +23,7 @@
 #include <linux/input.h>
 #include <linux/major.h>
 #include <linux/device.h>
+#include <linux/wakelock.h>
 #include "input-compat.h"
 
 struct evdev {
@@ -43,6 +44,8 @@
 	unsigned int tail;
 	unsigned int packet_head; /* [future] position of the first element of next packet */
 	spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+	struct wake_lock wake_lock;
+	char name[28];
 	struct fasync_struct *fasync;
 	struct evdev *evdev;
 	struct list_head node;
@@ -59,6 +62,7 @@
 	/* Interrupts are disabled, just acquire the lock. */
 	spin_lock(&client->buffer_lock);
 
+	wake_lock_timeout(&client->wake_lock, 5 * HZ);
 	client->buffer[client->head++] = *event;
 	client->head &= client->bufsize - 1;
 
@@ -94,8 +98,11 @@
 	struct evdev *evdev = handle->private;
 	struct evdev_client *client;
 	struct input_event event;
+	struct timespec ts;
 
-	do_gettimeofday(&event.time);
+	ktime_get_ts(&ts);
+	event.time.tv_sec = ts.tv_sec;
+	event.time.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
 	event.type = type;
 	event.code = code;
 	event.value = value;
@@ -255,6 +262,7 @@
 	mutex_unlock(&evdev->mutex);
 
 	evdev_detach_client(evdev, client);
+	wake_lock_destroy(&client->wake_lock);
 	kfree(client);
 
 	evdev_close_device(evdev);
@@ -306,6 +314,9 @@
 
 	client->bufsize = bufsize;
 	spin_lock_init(&client->buffer_lock);
+	snprintf(client->name, sizeof(client->name), "%s-%d",
+			dev_name(&evdev->dev), task_tgid_vnr(current));
+	wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
 	client->evdev = evdev;
 	evdev_attach_client(evdev, client);
 
@@ -320,6 +331,7 @@
 
  err_free_client:
 	evdev_detach_client(evdev, client);
+	wake_lock_destroy(&client->wake_lock);
 	kfree(client);
  err_put_evdev:
 	put_device(&evdev->dev);
@@ -373,6 +385,8 @@
 	if (have_event) {
 		*event = client->buffer[client->tail++];
 		client->tail &= client->bufsize - 1;
+		if (client->head == client->tail)
+			wake_unlock(&client->wake_lock);
 	}
 
 	spin_unlock_irq(&client->buffer_lock);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 0000000..36208fe
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,239 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+
+struct keyreset_state {
+	struct input_handler input_handler;
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+	spinlock_t lock;
+	int key_down_target;
+	int key_down;
+	int key_up;
+	int restart_disabled;
+	int (*reset_fn)(void);
+};
+
+int restart_requested;
+static void deferred_restart(struct work_struct *dummy)
+{
+	restart_requested = 2;
+	sys_sync();
+	restart_requested = 3;
+	kernel_restart(NULL);
+}
+static DECLARE_WORK(restart_work, deferred_restart);
+
+static void keyreset_event(struct input_handle *handle, unsigned int type,
+			   unsigned int code, int value)
+{
+	unsigned long flags;
+	struct keyreset_state *state = handle->private;
+
+	if (type != EV_KEY)
+		return;
+
+	if (code >= KEY_MAX)
+		return;
+
+	if (!test_bit(code, state->keybit))
+		return;
+
+	spin_lock_irqsave(&state->lock, flags);
+	if (!test_bit(code, state->key) == !value)
+		goto done;
+	__change_bit(code, state->key);
+	if (test_bit(code, state->upbit)) {
+		if (value) {
+			state->restart_disabled = 1;
+			state->key_up++;
+		} else
+			state->key_up--;
+	} else {
+		if (value)
+			state->key_down++;
+		else
+			state->key_down--;
+	}
+	if (state->key_down == 0 && state->key_up == 0)
+		state->restart_disabled = 0;
+
+	pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value,
+		 state->key_down, state->key_up, state->restart_disabled);
+
+	if (value && !state->restart_disabled &&
+	    state->key_down == state->key_down_target) {
+		state->restart_disabled = 1;
+		if (restart_requested)
+			panic("keyboard reset failed, %d", restart_requested);
+		if (state->reset_fn) {
+			restart_requested = state->reset_fn();
+		} else {
+			pr_info("keyboard reset\n");
+			schedule_work(&restart_work);
+			restart_requested = 1;
+		}
+	}
+done:
+	spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keyreset_connect(struct input_handler *handler,
+					  struct input_dev *dev,
+					  const struct input_device_id *id)
+{
+	int i;
+	int ret;
+	struct input_handle *handle;
+	struct keyreset_state *state =
+		container_of(handler, struct keyreset_state, input_handler);
+
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "keyreset";
+	handle->private = state;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	pr_info("using input dev %s for key reset\n", dev->name);
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keyreset_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id keyreset_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(input, keyreset_ids);
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+	int ret;
+	int key, *keyp;
+	struct keyreset_state *state;
+	struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	spin_lock_init(&state->lock);
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		state->key_down_target++;
+		__set_bit(key, state->keybit);
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			__set_bit(key, state->keybit);
+			__set_bit(key, state->upbit);
+		}
+	}
+
+	if (pdata->reset_fn)
+		state->reset_fn = pdata->reset_fn;
+
+	state->input_handler.event = keyreset_event;
+	state->input_handler.connect = keyreset_connect;
+	state->input_handler.disconnect = keyreset_disconnect;
+	state->input_handler.name = KEYRESET_NAME;
+	state->input_handler.id_table = keyreset_ids;
+	ret = input_register_handler(&state->input_handler);
+	if (ret) {
+		kfree(state);
+		return ret;
+	}
+	platform_set_drvdata(pdev, state);
+	return 0;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+	struct keyreset_state *state = platform_get_drvdata(pdev);
+	input_unregister_handler(&state->input_handler);
+	kfree(state);
+	return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+	.driver.name = KEYRESET_NAME,
+	.probe = keyreset_probe,
+	.remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+	return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+	return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 45dc6aa..6f4ad1a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -193,6 +193,17 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called ati_remote2.
 
+config INPUT_KEYCHORD
+	tristate "Key chord input driver support"
+	help
+	  Say Y here if you want to enable the key chord driver
+	  accessible at /dev/keychord.  This driver can be used
+	  for receiving notifications when client specified key
+	  combinations are pressed.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
 	tristate "Keyspan DMR USB remote control (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
@@ -294,6 +305,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called sgi_btns.
 
+config INPUT_GPIO
+	tristate "GPIO driver support"
+	help
+	  Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
 	tristate "HP SDC Real Time Clock"
 	depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 38efb2c..eb73834 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -22,8 +22,10 @@
 obj-$(CONFIG_INPUT_CMA3000_I2C)		+= cma3000_d0x_i2c.o
 obj-$(CONFIG_INPUT_COBALT_BTNS)		+= cobalt_btns.o
 obj-$(CONFIG_INPUT_DM355EVM)		+= dm355evm_keys.o
+obj-$(CONFIG_INPUT_GPIO)		+= gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD)		+= keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
 obj-$(CONFIG_INPUT_MAX8925_ONKEY)	+= max8925_onkey.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 0000000..0acf4a5
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_axis_info *info;
+	uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+	[0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+	[0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+	[0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+	[0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+	[0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+	[0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+	[0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+	[0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+	[0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /*     10000 10100 11100 */
+	[0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /*     11110 11010 11000 */
+	[0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /*    01000 01010 01110  */
+	[0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /*    01111 01101 01100  */
+	[0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /*   00100 00101 00111   */
+	[0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /*   10111 10110 00110   */
+	[0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /*  00010 10010 10011    */
+	[0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /*  11011 01011 00011    */
+	[0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001     */
+	[0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001     */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+	struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+	struct gpio_event_axis_info *ai = as->info;
+	int i;
+	int change;
+	uint16_t state = 0;
+	uint16_t pos;
+	uint16_t old_pos = as->pos;
+	for (i = ai->count - 1; i >= 0; i--)
+		state = (state << 1) | gpio_get_value(ai->gpio[i]);
+	pos = ai->map(ai, state);
+	if (ai->flags & GPIOEAF_PRINT_RAW)
+		pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+			ai->type, ai->code, state, old_pos, pos);
+	if (report && pos != old_pos) {
+		if (ai->type == EV_REL) {
+			change = (ai->decoded_size + pos - old_pos) %
+				  ai->decoded_size;
+			if (change > ai->decoded_size / 2)
+				change -= ai->decoded_size;
+			if (change == ai->decoded_size / 2) {
+				if (ai->flags & GPIOEAF_PRINT_EVENT)
+					pr_info("axis %d-%d unknown direction, "
+						"pos %d -> %d\n", ai->type,
+						ai->code, old_pos, pos);
+				change = 0; /* no closest direction */
+			}
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d change %d\n",
+					ai->type, ai->code, change);
+			input_report_rel(as->input_devs->dev[ai->dev],
+						ai->code, change);
+		} else {
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d now %d\n",
+					ai->type, ai->code, pos);
+			input_event(as->input_devs->dev[ai->dev],
+					ai->type, ai->code, pos);
+		}
+		input_sync(as->input_devs->dev[ai->dev]);
+	}
+	as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_axis_state *as = dev_id;
+	gpio_event_update_axis(as, 1);
+	return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			 struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	int irq;
+	struct gpio_event_axis_info *ai;
+	struct gpio_axis_state *as;
+
+	ai = container_of(info, struct gpio_event_axis_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		for (i = 0; i < ai->count; i++)
+			disable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		for (i = 0; i < ai->count; i++)
+			enable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		*data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+		if (as == NULL) {
+			ret = -ENOMEM;
+			goto err_alloc_axis_state_failed;
+		}
+		as->input_devs = input_devs;
+		as->info = ai;
+		if (ai->dev >= input_devs->count) {
+			pr_err("gpio_event_axis: bad device index %d >= %d "
+				"for %d:%d\n", ai->dev, input_devs->count,
+				ai->type, ai->code);
+			ret = -EINVAL;
+			goto err_bad_device_index;
+		}
+
+		input_set_capability(input_devs->dev[ai->dev],
+				     ai->type, ai->code);
+		if (ai->type == EV_ABS) {
+			input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+					     0, ai->decoded_size - 1, 0, 0);
+		}
+		for (i = 0; i < ai->count; i++) {
+			ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+			if (ret < 0)
+				goto err_request_gpio_failed;
+			ret = gpio_direction_input(ai->gpio[i]);
+			if (ret < 0)
+				goto err_gpio_direction_input_failed;
+			ret = irq = gpio_to_irq(ai->gpio[i]);
+			if (ret < 0)
+				goto err_get_irq_num_failed;
+			ret = request_irq(irq, gpio_axis_irq_handler,
+					  IRQF_TRIGGER_RISING |
+					  IRQF_TRIGGER_FALLING,
+					  "gpio_event_axis", as);
+			if (ret < 0)
+				goto err_request_irq_failed;
+		}
+		gpio_event_update_axis(as, 0);
+		return 0;
+	}
+
+	ret = 0;
+	as = *data;
+	for (i = ai->count - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+		gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+		;
+	}
+err_bad_device_index:
+	kfree(as);
+	*data = NULL;
+err_alloc_axis_state_failed:
+	return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 0000000..a98be67
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,260 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_platform_data *info;
+	struct early_suspend early_suspend;
+	void *state[0];
+};
+
+static int gpio_input_event(
+	struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+	int i;
+	int devnr;
+	int ret = 0;
+	int tmp_ret;
+	struct gpio_event_info **ii;
+	struct gpio_event *ip = input_get_drvdata(dev);
+
+	for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+		if (ip->input_devs->dev[devnr] == dev)
+			break;
+	if (devnr == ip->input_devs->count) {
+		pr_err("gpio_input_event: unknown device %p\n", dev);
+		return -EIO;
+	}
+
+	for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+		if ((*ii)->event) {
+			tmp_ret = (*ii)->event(ip->input_devs, *ii,
+						&ip->state[i],
+						devnr, type, code, value);
+			if (tmp_ret)
+				ret = tmp_ret;
+		}
+	}
+	return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+	int i;
+	int ret;
+	struct gpio_event_info **ii;
+
+	if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+		ii = ip->info->info;
+		for (i = 0; i < ip->info->info_count; i++, ii++) {
+			if ((*ii)->func == NULL) {
+				ret = -ENODEV;
+				pr_err("gpio_event_probe: Incomplete pdata, "
+					"no function\n");
+				goto err_no_func;
+			}
+			if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+				continue;
+			ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+					  func);
+			if (ret) {
+				pr_err("gpio_event_probe: function failed\n");
+				goto err_func_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	i = ip->info->info_count;
+	ii = ip->info->info + i;
+	while (i > 0) {
+		i--;
+		ii--;
+		if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+			continue;
+		(*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+		;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void gpio_event_suspend(struct early_suspend *h)
+{
+	struct gpio_event *ip;
+	ip = container_of(h, struct gpio_event, early_suspend);
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+	ip->info->power(ip->info, 0);
+}
+
+void gpio_event_resume(struct early_suspend *h)
+{
+	struct gpio_event *ip;
+	ip = container_of(h, struct gpio_event, early_suspend);
+	ip->info->power(ip->info, 1);
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+#endif
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+	int err;
+	struct gpio_event *ip;
+	struct gpio_event_platform_data *event_info;
+	int dev_count = 1;
+	int i;
+	int registered = 0;
+
+	event_info = pdev->dev.platform_data;
+	if (event_info == NULL) {
+		pr_err("gpio_event_probe: No pdata\n");
+		return -ENODEV;
+	}
+	if ((!event_info->name && !event_info->names[0]) ||
+	    !event_info->info || !event_info->info_count) {
+		pr_err("gpio_event_probe: Incomplete pdata\n");
+		return -ENODEV;
+	}
+	if (!event_info->name)
+		while (event_info->names[dev_count])
+			dev_count++;
+	ip = kzalloc(sizeof(*ip) +
+		     sizeof(ip->state[0]) * event_info->info_count +
+		     sizeof(*ip->input_devs) +
+		     sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+	if (ip == NULL) {
+		err = -ENOMEM;
+		pr_err("gpio_event_probe: Failed to allocate private data\n");
+		goto err_kp_alloc_failed;
+	}
+	ip->input_devs = (void*)&ip->state[event_info->info_count];
+	platform_set_drvdata(pdev, ip);
+
+	for (i = 0; i < dev_count; i++) {
+		struct input_dev *input_dev = input_allocate_device();
+		if (input_dev == NULL) {
+			err = -ENOMEM;
+			pr_err("gpio_event_probe: "
+				"Failed to allocate input device\n");
+			goto err_input_dev_alloc_failed;
+		}
+		input_set_drvdata(input_dev, ip);
+		input_dev->name = event_info->name ?
+					event_info->name : event_info->names[i];
+		input_dev->event = gpio_input_event;
+		ip->input_devs->dev[i] = input_dev;
+	}
+	ip->input_devs->count = dev_count;
+	ip->info = event_info;
+	if (event_info->power) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		ip->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+		ip->early_suspend.suspend = gpio_event_suspend;
+		ip->early_suspend.resume = gpio_event_resume;
+		register_early_suspend(&ip->early_suspend);
+#endif
+		ip->info->power(ip->info, 1);
+	}
+
+	err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+	if (err)
+		goto err_call_all_func_failed;
+
+	for (i = 0; i < dev_count; i++) {
+		err = input_register_device(ip->input_devs->dev[i]);
+		if (err) {
+			pr_err("gpio_event_probe: Unable to register %s "
+				"input device\n", ip->input_devs->dev[i]->name);
+			goto err_input_register_device_failed;
+		}
+		registered++;
+	}
+
+	return 0;
+
+err_input_register_device_failed:
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+	if (event_info->power) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		unregister_early_suspend(&ip->early_suspend);
+#endif
+		ip->info->power(ip->info, 0);
+	}
+	for (i = 0; i < registered; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	for (i = dev_count - 1; i >= registered; i--) {
+		input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+		;
+	}
+	kfree(ip);
+err_kp_alloc_failed:
+	return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+	struct gpio_event *ip = platform_get_drvdata(pdev);
+	int i;
+
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+	if (ip->info->power) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		unregister_early_suspend(&ip->early_suspend);
+#endif
+		ip->info->power(ip->info, 0);
+	}
+	for (i = 0; i < ip->input_devs->count; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	kfree(ip);
+	return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+	.probe		= gpio_event_probe,
+	.remove		= gpio_event_remove,
+	.driver		= {
+		.name	= GPIO_EVENT_DEV_NAME,
+	},
+};
+
+static int __devinit gpio_event_init(void)
+{
+	return platform_driver_register(&gpio_event_driver);
+}
+
+static void __exit gpio_event_exit(void)
+{
+	platform_driver_unregister(&gpio_event_driver);
+}
+
+module_init(gpio_event_init);
+module_exit(gpio_event_exit);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 0000000..98d204f
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,361 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+enum {
+	DEBOUNCE_UNSTABLE     = BIT(0),	/* Got irq, while debouncing */
+	DEBOUNCE_PRESSED      = BIT(1),
+	DEBOUNCE_NOTPRESSED   = BIT(2),
+	DEBOUNCE_WAIT_IRQ     = BIT(3),	/* Stable irq state */
+	DEBOUNCE_POLL         = BIT(4),	/* Stable polling state */
+
+	DEBOUNCE_UNKNOWN =
+		DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+	struct gpio_input_state *ds;
+	uint8_t debounce;
+};
+
+struct gpio_input_state {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_input_info *info;
+	struct hrtimer timer;
+	int use_irq;
+	int debounce_count;
+	spinlock_t irq_lock;
+	struct wake_lock wake_lock;
+	struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+	int i;
+	int pressed;
+	struct gpio_input_state *ds =
+		container_of(timer, struct gpio_input_state, timer);
+	unsigned gpio_flags = ds->info->flags;
+	unsigned npolarity;
+	int nkeys = ds->info->keymap_size;
+	const struct gpio_event_direct_entry *key_entry;
+	struct gpio_key_state *key_state;
+	unsigned long irqflags;
+	uint8_t debounce;
+	bool sync_needed;
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+#endif
+	key_entry = ds->info->keymap;
+	key_state = ds->key_state;
+	sync_needed = false;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		debounce = key_state->debounce;
+		if (debounce & DEBOUNCE_WAIT_IRQ)
+			continue;
+		if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+			debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+			enable_irq(gpio_to_irq(key_entry->gpio));
+			pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+				"(%d) continue debounce\n",
+				ds->info->type, key_entry->code,
+				i, key_entry->gpio);
+		}
+		npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+		pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+		if (debounce & DEBOUNCE_POLL) {
+			if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+				ds->debounce_count++;
+				key_state->debounce = DEBOUNCE_UNKNOWN;
+				if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+					pr_info("gpio_keys_scan_keys: key %x-"
+						"%x, %d (%d) start debounce\n",
+						ds->info->type, key_entry->code,
+						i, key_entry->gpio);
+			}
+			continue;
+		}
+		if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 1\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_PRESSED;
+			continue;
+		}
+		if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 0\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_NOTPRESSED;
+			continue;
+		}
+		/* key is stable */
+		ds->debounce_count--;
+		if (ds->use_irq)
+			key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+		else
+			key_state->debounce |= DEBOUNCE_POLL;
+		if (gpio_flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+				"changed to %d\n", ds->info->type,
+				key_entry->code, i, key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		sync_needed = true;
+	}
+	if (sync_needed) {
+		for (i = 0; i < ds->input_devs->count; i++)
+			input_sync(ds->input_devs->dev[i]);
+	}
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+	}
+#endif
+
+	if (ds->debounce_count)
+		hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+	else if (!ds->use_irq)
+		hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+	else
+		wake_unlock(&ds->wake_lock);
+
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_key_state *ks = dev_id;
+	struct gpio_input_state *ds = ks->ds;
+	int keymap_index = ks - ds->key_state;
+	const struct gpio_event_direct_entry *key_entry;
+	unsigned long irqflags;
+	int pressed;
+
+	if (!ds->use_irq)
+		return IRQ_HANDLED;
+
+	key_entry = &ds->info->keymap[keymap_index];
+
+	if (ds->info->debounce_time.tv64) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+			ks->debounce = DEBOUNCE_UNKNOWN;
+			if (ds->debounce_count++ == 0) {
+				wake_lock(&ds->wake_lock);
+				hrtimer_start(
+					&ds->timer, ds->info->debounce_time,
+					HRTIMER_MODE_REL);
+			}
+			if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_event_input_irq_handler: "
+					"key %x-%x, %d (%d) start debounce\n",
+					ds->info->type, key_entry->code,
+					keymap_index, key_entry->gpio);
+		} else {
+			disable_irq_nosync(irq);
+			ks->debounce = DEBOUNCE_UNSTABLE;
+		}
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+	} else {
+		pressed = gpio_get_value(key_entry->gpio) ^
+			!(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+		if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+				"(%d) changed to %d\n",
+				ds->info->type, key_entry->code, keymap_index,
+				key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		input_sync(ds->input_devs->dev[key_entry->dev]);
+	}
+	return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+	for (i = 0; i < ds->info->keymap_size; i++) {
+		err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_event_input_irq_handler,
+				  req_flags, "gpio_keys", &ds->key_state[i]);
+		if (err) {
+			pr_err("gpio_event_input_request_irqs: request_irq "
+				"failed for input %d, irq %d\n",
+				ds->info->keymap[i].gpio, irq);
+			goto err_request_irq_failed;
+		}
+		enable_irq_wake(irq);
+	}
+	return 0;
+
+	for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(ds->info->keymap[i].gpio),
+			 &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	unsigned long irqflags;
+	struct gpio_event_input_info *di;
+	struct gpio_input_state *ds = *data;
+
+	di = container_of(info, struct gpio_event_input_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				disable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_cancel(&ds->timer);
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				enable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (ktime_to_ns(di->poll_time) <= 0)
+			di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+		*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+					di->keymap_size, GFP_KERNEL);
+		if (ds == NULL) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate private data\n");
+			goto err_ds_alloc_failed;
+		}
+		ds->debounce_count = di->keymap_size;
+		ds->input_devs = input_devs;
+		ds->info = di;
+		wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input");
+		spin_lock_init(&ds->irq_lock);
+
+		for (i = 0; i < di->keymap_size; i++) {
+			int dev = di->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_input_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					di->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], di->type,
+					     di->keymap[i].code);
+			ds->key_state[i].ds = ds;
+			ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+		}
+
+		for (i = 0; i < di->keymap_size; i++) {
+			ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+			if (ret) {
+				pr_err("gpio_event_input_func: gpio_request "
+					"failed for %d\n", di->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_input(di->keymap[i].gpio);
+			if (ret) {
+				pr_err("gpio_event_input_func: "
+					"gpio_direction_input failed for %d\n",
+					di->keymap[i].gpio);
+				goto err_gpio_configure_failed;
+			}
+		}
+
+		ret = gpio_event_input_request_irqs(ds);
+
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		ds->use_irq = ret == 0;
+
+		pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+			"mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			ret == 0 ? "interrupt" : "polling");
+
+		hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ds->timer.function = gpio_event_input_timer_func;
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	ret = 0;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	hrtimer_cancel(&ds->timer);
+	if (ds->use_irq) {
+		for (i = di->keymap_size - 1; i >= 0; i--) {
+			free_irq(gpio_to_irq(di->keymap[i].gpio),
+				 &ds->key_state[i]);
+		}
+	}
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+		gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	wake_lock_destroy(&ds->wake_lock);
+	kfree(ds);
+err_ds_alloc_failed:
+	return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 0000000..eaa9e89
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_matrix_info *keypad_info;
+	struct hrtimer timer;
+	struct wake_lock wake_lock;
+	int current_output;
+	unsigned int use_irq:1;
+	unsigned int key_state_changed:1;
+	unsigned int last_key_state_changed:1;
+	unsigned int some_keys_pressed:2;
+	unsigned int disabled_irq:1;
+	unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int key_index = out * mi->ninputs + in;
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+		__clear_bit(key_index, kp->keys_pressed);
+	} else {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"not cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+	}
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+	int rv = 0;
+	int key_index;
+
+	key_index = out * kp->keypad_info->ninputs + in;
+	while (out < kp->keypad_info->noutputs) {
+		if (test_bit(key_index, kp->keys_pressed)) {
+			rv = 1;
+			clear_phantom_key(kp, out, in);
+		}
+		key_index += kp->keypad_info->ninputs;
+		out++;
+	}
+	return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+	int out, in, inp;
+	int key_index;
+
+	if (kp->some_keys_pressed < 3)
+		return;
+
+	for (out = 0; out < kp->keypad_info->noutputs; out++) {
+		inp = -1;
+		key_index = out * kp->keypad_info->ninputs;
+		for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+			if (test_bit(key_index, kp->keys_pressed)) {
+				if (inp == -1) {
+					inp = in;
+					continue;
+				}
+				if (inp >= 0) {
+					if (!restore_keys_for_input(kp, out + 1,
+									inp))
+						break;
+					clear_phantom_key(kp, out, inp);
+					inp = -2;
+				}
+				restore_keys_for_input(kp, out, in);
+			}
+		}
+	}
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int pressed = test_bit(key_index, kp->keys_pressed);
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (keycode == KEY_RESERVED) {
+			if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+				pr_info("gpiomatrix: unmapped key, %d-%d "
+					"(%d-%d) changed to %d\n",
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+		} else {
+			if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+				pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+					"changed to %d\n", keycode,
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+			input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+		}
+	}
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+	int i;
+
+	for (i = 0; i < kp->input_devs->count; i++)
+		input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+	int out, in;
+	int key_index;
+	int gpio;
+	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+	out = kp->current_output;
+	if (out == mi->noutputs) {
+		out = 0;
+		kp->last_key_state_changed = kp->key_state_changed;
+		kp->key_state_changed = 0;
+		kp->some_keys_pressed = 0;
+	} else {
+		key_index = out * mi->ninputs;
+		for (in = 0; in < mi->ninputs; in++, key_index++) {
+			gpio = mi->input_gpios[in];
+			if (gpio_get_value(gpio) ^ !polarity) {
+				if (kp->some_keys_pressed < 3)
+					kp->some_keys_pressed++;
+				kp->key_state_changed |= !__test_and_set_bit(
+						key_index, kp->keys_pressed);
+			} else
+				kp->key_state_changed |= __test_and_clear_bit(
+						key_index, kp->keys_pressed);
+		}
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, !polarity);
+		else
+			gpio_direction_input(gpio);
+		out++;
+	}
+	kp->current_output = out;
+	if (out < mi->noutputs) {
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, polarity);
+		else
+			gpio_direction_output(gpio, polarity);
+		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+		if (kp->key_state_changed) {
+			hrtimer_start(&kp->timer, mi->debounce_delay,
+				      HRTIMER_MODE_REL);
+			return HRTIMER_NORESTART;
+		}
+		kp->key_state_changed = kp->last_key_state_changed;
+	}
+	if (kp->key_state_changed) {
+		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+			remove_phantom_keys(kp);
+		key_index = 0;
+		for (out = 0; out < mi->noutputs; out++)
+			for (in = 0; in < mi->ninputs; in++, key_index++)
+				report_key(kp, key_index, out, in);
+		report_sync(kp);
+	}
+	if (!kp->use_irq || kp->some_keys_pressed) {
+		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+
+	/* No keys are pressed, reenable interrupt */
+	for (out = 0; out < mi->noutputs; out++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[out], polarity);
+		else
+			gpio_direction_output(mi->output_gpios[out], polarity);
+	}
+	for (in = 0; in < mi->ninputs; in++)
+		enable_irq(gpio_to_irq(mi->input_gpios[in]));
+	wake_unlock(&kp->wake_lock);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+	int i;
+	struct gpio_kp *kp = dev_id;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+
+	if (!kp->use_irq) {
+		/* ignore interrupt while registering the handler */
+		kp->disabled_irq = 1;
+		disable_irq_nosync(irq_in);
+		return IRQ_HANDLED;
+	}
+
+	for (i = 0; i < mi->ninputs; i++)
+		disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+	for (i = 0; i < mi->noutputs; i++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[i],
+				!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+		else
+			gpio_direction_input(mi->output_gpios[i]);
+	}
+	wake_lock(&kp->wake_lock);
+	hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+	return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long request_flags;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+	switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+	default:
+		request_flags = IRQF_TRIGGER_FALLING;
+		break;
+	case GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_RISING;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+		request_flags = IRQF_TRIGGER_LOW;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_HIGH;
+		break;
+	}
+
+	for (i = 0; i < mi->ninputs; i++) {
+		err = irq = gpio_to_irq(mi->input_gpios[i]);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+				  "gpio_kp", kp);
+		if (err) {
+			pr_err("gpiomatrix: request_irq failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+			goto err_request_irq_failed;
+		}
+		err = enable_irq_wake(irq);
+		if (err) {
+			pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+		}
+		disable_irq(irq);
+		if (kp->disabled_irq) {
+			kp->disabled_irq = 0;
+			enable_irq(irq);
+		}
+	}
+	return 0;
+
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+	struct gpio_event_info *info, void **data, int func)
+{
+	int i;
+	int err;
+	int key_count;
+	struct gpio_kp *kp;
+	struct gpio_event_matrix_info *mi;
+
+	mi = container_of(info, struct gpio_event_matrix_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+		/* TODO: disable scanning */
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (mi->keymap == NULL ||
+		   mi->input_gpios == NULL ||
+		   mi->output_gpios == NULL) {
+			err = -ENODEV;
+			pr_err("gpiomatrix: Incomplete pdata\n");
+			goto err_invalid_platform_data;
+		}
+		key_count = mi->ninputs * mi->noutputs;
+
+		*data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+				     BITS_TO_LONGS(key_count), GFP_KERNEL);
+		if (kp == NULL) {
+			err = -ENOMEM;
+			pr_err("gpiomatrix: Failed to allocate private data\n");
+			goto err_kp_alloc_failed;
+		}
+		kp->input_devs = input_devs;
+		kp->keypad_info = mi;
+		for (i = 0; i < key_count; i++) {
+			unsigned short keyentry = mi->keymap[i];
+			unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+			unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+			if (dev >= input_devs->count) {
+				pr_err("gpiomatrix: bad device index %d >= "
+					"%d for key code %d\n",
+					dev, input_devs->count, keycode);
+				err = -EINVAL;
+				goto err_bad_keymap;
+			}
+			if (keycode && keycode <= KEY_MAX)
+				input_set_capability(input_devs->dev[dev],
+							EV_KEY, keycode);
+		}
+
+		for (i = 0; i < mi->noutputs; i++) {
+			err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_request_output_gpio_failed;
+			}
+			if (gpio_cansleep(mi->output_gpios[i])) {
+				pr_err("gpiomatrix: unsupported output gpio %d,"
+					" can sleep\n", mi->output_gpios[i]);
+				err = -EINVAL;
+				goto err_output_gpio_configure_failed;
+			}
+			if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+				err = gpio_direction_output(mi->output_gpios[i],
+					!(mi->flags & GPIOKPF_ACTIVE_HIGH));
+			else
+				err = gpio_direction_input(mi->output_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_configure failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_output_gpio_configure_failed;
+			}
+		}
+		for (i = 0; i < mi->ninputs; i++) {
+			err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"input %d\n", mi->input_gpios[i]);
+				goto err_request_input_gpio_failed;
+			}
+			err = gpio_direction_input(mi->input_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_direction_input failed"
+					" for input %d\n", mi->input_gpios[i]);
+				goto err_gpio_direction_input_failed;
+			}
+		}
+		kp->current_output = mi->noutputs;
+		kp->key_state_changed = 1;
+
+		hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		kp->timer.function = gpio_keypad_timer_func;
+		wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+		err = gpio_keypad_request_irqs(kp);
+		kp->use_irq = err == 0;
+
+		pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+			"%s%s in %s mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			kp->use_irq ? "interrupt" : "polling");
+
+		if (kp->use_irq)
+			wake_lock(&kp->wake_lock);
+		hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+		return 0;
+	}
+
+	err = 0;
+	kp = *data;
+
+	if (kp->use_irq)
+		for (i = mi->noutputs - 1; i >= 0; i--)
+			free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+	hrtimer_cancel(&kp->timer);
+	wake_lock_destroy(&kp->wake_lock);
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+		gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+		;
+	}
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+		gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+		;
+	}
+err_bad_keymap:
+	kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+	return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 0000000..2aac2fa
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, unsigned int dev, unsigned int type,
+	unsigned int code, int value)
+{
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+	if (type != oi->type)
+		return 0;
+	if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+		value = !value;
+	for (i = 0; i < oi->keymap_size; i++)
+		if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+			gpio_set_value(oi->keymap[i].gpio, value);
+	return 0;
+}
+
+int gpio_event_output_func(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, int func)
+{
+	int ret;
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+		return 0;
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			int dev = oi->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_output_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					oi->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], oi->type,
+					     oi->keymap[i].code);
+		}
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			ret = gpio_request(oi->keymap[i].gpio,
+					   "gpio_event_output");
+			if (ret) {
+				pr_err("gpio_event_output_func: gpio_request "
+					"failed for %d\n", oi->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_output(oi->keymap[i].gpio,
+						    output_level);
+			if (ret) {
+				pr_err("gpio_event_output_func: "
+					"gpio_direction_output failed for %d\n",
+					oi->keymap[i].gpio);
+				goto err_gpio_direction_output_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+		gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 0000000..3ffab6d
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,387 @@
+/*
+ *  drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME		"keychord"
+#define BUFFER_SIZE			16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+		((char *)kc + sizeof(struct input_keychord) + \
+		kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+	struct input_handler	input_handler;
+	int			registered;
+
+	/* list of keychords to monitor */
+	struct input_keychord	*keychords;
+	int			keychord_count;
+
+	/* bitmask of keys contained in our keychords */
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	/* current state of the keys */
+	unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+	/* number of keys that are currently pressed */
+	int key_down;
+
+	/* second input_device_id is needed for null termination */
+	struct input_device_id  device_ids[2];
+
+	spinlock_t		lock;
+	wait_queue_head_t	waitq;
+	unsigned char		head;
+	unsigned char		tail;
+	__u16			buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+		struct input_keychord *keychord)
+{
+	int i;
+
+	if (keychord->count != kdev->key_down)
+		return 0;
+
+	for (i = 0; i < keychord->count; i++) {
+		if (!test_bit(keychord->keycodes[i], kdev->keystate))
+			return 0;
+	}
+
+	/* we have a match */
+	return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+			   unsigned int code, int value)
+{
+	struct keychord_device *kdev = handle->private;
+	struct input_keychord *keychord;
+	unsigned long flags;
+	int i, got_chord = 0;
+
+	if (type != EV_KEY || code >= KEY_MAX)
+		return;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* do nothing if key state did not change */
+	if (!test_bit(code, kdev->keystate) == !value)
+		goto done;
+	__change_bit(code, kdev->keystate);
+	if (value)
+		kdev->key_down++;
+	else
+		kdev->key_down--;
+
+	/* don't notify on key up */
+	if (!value)
+		goto done;
+	/* ignore this event if it is not one of the keys we are monitoring */
+	if (!test_bit(code, kdev->keybit))
+		goto done;
+
+	keychord = kdev->keychords;
+	if (!keychord)
+		goto done;
+
+	/* check to see if the keyboard state matches any keychords */
+	for (i = 0; i < kdev->keychord_count; i++) {
+		if (check_keychord(kdev, keychord)) {
+			kdev->buff[kdev->head] = keychord->id;
+			kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+			got_chord = 1;
+			break;
+		}
+		/* skip to next keychord */
+		keychord = NEXT_KEYCHORD(keychord);
+	}
+
+done:
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	if (got_chord)
+		wake_up_interruptible(&kdev->waitq);
+}
+
+static int keychord_connect(struct input_handler *handler,
+					  struct input_dev *dev,
+					  const struct input_device_id *id)
+{
+	int i, ret;
+	struct input_handle *handle;
+	struct keychord_device *kdev =
+		container_of(handler, struct keychord_device, input_handler);
+
+	/*
+	 * ignore this input device if it does not contain any keycodes
+	 * that we are monitoring
+	 */
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = KEYCHORD_NAME;
+	handle->private = kdev;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct keychord_device *kdev = file->private_data;
+	__u16   id;
+	int retval;
+	unsigned long flags;
+
+	if (count < sizeof(id))
+		return -EINVAL;
+	count = sizeof(id);
+
+	if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	retval = wait_event_interruptible(kdev->waitq,
+			kdev->head != kdev->tail);
+	if (retval)
+		return retval;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* pop a keychord ID off the queue */
+	id = kdev->buff[kdev->tail];
+	kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	if (copy_to_user(buffer, &id, count))
+		return -EFAULT;
+
+	return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct keychord_device *kdev = file->private_data;
+	struct input_keychord *keychords = 0;
+	struct input_keychord *keychord, *next, *end;
+	int ret, i, key;
+	unsigned long flags;
+
+	if (count < sizeof(struct input_keychord))
+		return -EINVAL;
+	keychords = kzalloc(count, GFP_KERNEL);
+	if (!keychords)
+		return -ENOMEM;
+
+	/* read list of keychords from userspace */
+	if (copy_from_user(keychords, buffer, count)) {
+		kfree(keychords);
+		return -EFAULT;
+	}
+
+	/* unregister handler before changing configuration */
+	if (kdev->registered) {
+		input_unregister_handler(&kdev->input_handler);
+		kdev->registered = 0;
+	}
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* clear any existing configuration */
+	kfree(kdev->keychords);
+	kdev->keychords = 0;
+	kdev->keychord_count = 0;
+	kdev->key_down = 0;
+	memset(kdev->keybit, 0, sizeof(kdev->keybit));
+	memset(kdev->keystate, 0, sizeof(kdev->keystate));
+	kdev->head = kdev->tail = 0;
+
+	keychord = keychords;
+	end = (struct input_keychord *)((char *)keychord + count);
+
+	while (keychord < end) {
+		next = NEXT_KEYCHORD(keychord);
+		if (keychord->count <= 0 || next > end) {
+			pr_err("keychord: invalid keycode count %d\n",
+				keychord->count);
+			goto err_unlock_return;
+		}
+		if (keychord->version != KEYCHORD_VERSION) {
+			pr_err("keychord: unsupported version %d\n",
+				keychord->version);
+			goto err_unlock_return;
+		}
+
+		/* keep track of the keys we are monitoring in keybit */
+		for (i = 0; i < keychord->count; i++) {
+			key = keychord->keycodes[i];
+			if (key < 0 || key >= KEY_CNT) {
+				pr_err("keychord: keycode %d out of range\n",
+					key);
+				goto err_unlock_return;
+			}
+			__set_bit(key, kdev->keybit);
+		}
+
+		kdev->keychord_count++;
+		keychord = next;
+	}
+
+	kdev->keychords = keychords;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	ret = input_register_handler(&kdev->input_handler);
+	if (ret) {
+		kfree(keychords);
+		kdev->keychords = 0;
+		return ret;
+	}
+	kdev->registered = 1;
+
+	return count;
+
+err_unlock_return:
+	spin_unlock_irqrestore(&kdev->lock, flags);
+	kfree(keychords);
+	return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+	struct keychord_device *kdev = file->private_data;
+
+	poll_wait(file, &kdev->waitq, wait);
+
+	if (kdev->head != kdev->tail)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+	struct keychord_device *kdev;
+
+	kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+	if (!kdev)
+		return -ENOMEM;
+
+	spin_lock_init(&kdev->lock);
+	init_waitqueue_head(&kdev->waitq);
+
+	kdev->input_handler.event = keychord_event;
+	kdev->input_handler.connect = keychord_connect;
+	kdev->input_handler.disconnect = keychord_disconnect;
+	kdev->input_handler.name = KEYCHORD_NAME;
+	kdev->input_handler.id_table = kdev->device_ids;
+
+	kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+	__set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+	file->private_data = kdev;
+
+	return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+	struct keychord_device *kdev = file->private_data;
+
+	if (kdev->registered)
+		input_unregister_handler(&kdev->input_handler);
+	kfree(kdev);
+
+	return 0;
+}
+
+static const struct file_operations keychord_fops = {
+	.owner		= THIS_MODULE,
+	.open		= keychord_open,
+	.release	= keychord_release,
+	.read		= keychord_read,
+	.write		= keychord_write,
+	.poll		= keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+	.fops		= &keychord_fops,
+	.name		= KEYCHORD_NAME,
+	.minor		= MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+	return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+	misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cabd9e5..4104103 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -383,6 +383,12 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tnetv107x-ts.
 
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI
+	tristate "Synaptics i2c touchscreen"
+	depends on I2C
+	help
+	  This enables support for Synaptics RMI over I2C based touchscreens.
+
 config TOUCHSCREEN_TOUCHRIGHT
 	tristate "Touchright serial touchscreen"
 	select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 282d6f7..0738f19 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -43,6 +43,7 @@
 obj-$(CONFIG_TOUCHSCREEN_ST1232)	+= st1232.o
 obj-$(CONFIG_TOUCHSCREEN_STMPE)		+= stmpe-ts.o
 obj-$(CONFIG_TOUCHSCREEN_TNETV107X)	+= tnetv107x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI)	+= synaptics_i2c_rmi.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213)	+= touchit213.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT)	+= touchright.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN)	+= touchwin.o
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 0000000..5729602
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,675 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+	uint16_t addr;
+	struct i2c_client *client;
+	struct input_dev *input_dev;
+	int use_irq;
+	bool has_relative_report;
+	struct hrtimer timer;
+	struct work_struct  work;
+	uint16_t max[2];
+	int snap_state[2][2];
+	int snap_down_on[2];
+	int snap_down_off[2];
+	int snap_up_on[2];
+	int snap_up_off[2];
+	int snap_down[2];
+	int snap_up[2];
+	uint32_t flags;
+	int reported_finger_count;
+	int8_t sensitivity_adjust;
+	int (*power)(int on);
+	struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+		goto err_page_select_failed;
+	}
+	ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+	if (ret < 0)
+		printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+	ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+					ts->sensitivity_adjust);
+	if (ret < 0)
+		pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+	ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+	if (ret < 0)
+		printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+	ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+	if (ret < 0)
+		printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+	return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+	int i;
+	int ret;
+	int bad_data = 0;
+	struct i2c_msg msg[2];
+	uint8_t start_reg;
+	uint8_t buf[15];
+	struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+	int buf_len = ts->has_relative_report ? 15 : 13;
+
+	msg[0].addr = ts->client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &start_reg;
+	start_reg = 0x00;
+	msg[1].addr = ts->client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = buf_len;
+	msg[1].buf = buf;
+
+	/* printk("synaptics_ts_work_func\n"); */
+	for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+		ret = i2c_transfer(ts->client->adapter, msg, 2);
+		if (ret < 0) {
+			printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+			bad_data = 1;
+		} else {
+			/* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+			/*        " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+			/*        buf[0], buf[1], buf[2], buf[3], */
+			/*        buf[4], buf[5], buf[6], buf[7], */
+			/*        buf[8], buf[9], buf[10], buf[11], */
+			/*        buf[12], buf[13], buf[14], ret); */
+			if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+				printk(KERN_WARNING "synaptics_ts_work_func:"
+				       " bad read %x %x %x %x %x %x %x %x %x"
+				       " %x %x %x %x %x %x, ret %d\n",
+				       buf[0], buf[1], buf[2], buf[3],
+				       buf[4], buf[5], buf[6], buf[7],
+				       buf[8], buf[9], buf[10], buf[11],
+				       buf[12], buf[13], buf[14], ret);
+				if (bad_data)
+					synaptics_init_panel(ts);
+				bad_data = 1;
+				continue;
+			}
+			bad_data = 0;
+			if ((buf[buf_len - 1] & 1) == 0) {
+				/* printk("read %d coordinates\n", i); */
+				break;
+			} else {
+				int pos[2][2];
+				int f, a;
+				int base;
+				/* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+				/* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+				int z = buf[1];
+				int w = buf[0] >> 4;
+				int finger = buf[0] & 7;
+
+				/* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+				/* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+				/* int z2 = buf[1+6]; */
+				/* int w2 = buf[0+6] >> 4; */
+				/* int finger2 = buf[0+6] & 7; */
+
+				/* int dx = (int8_t)buf[12]; */
+				/* int dy = (int8_t)buf[13]; */
+				int finger2_pressed;
+
+				/* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+				/*	x, y, z, w, finger, */
+				/*	x2, y2, z2, w2, finger2, */
+				/*	dx, dy); */
+
+				base = 2;
+				for (f = 0; f < 2; f++) {
+					uint32_t flip_flag = SYNAPTICS_FLIP_X;
+					for (a = 0; a < 2; a++) {
+						int p = buf[base + 1];
+						p |= (uint16_t)(buf[base] & 0x1f) << 8;
+						if (ts->flags & flip_flag)
+							p = ts->max[a] - p;
+						if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+							if (ts->snap_state[f][a]) {
+								if (p <= ts->snap_down_off[a])
+									p = ts->snap_down[a];
+								else if (p >= ts->snap_up_off[a])
+									p = ts->snap_up[a];
+								else
+									ts->snap_state[f][a] = 0;
+							} else {
+								if (p <= ts->snap_down_on[a]) {
+									p = ts->snap_down[a];
+									ts->snap_state[f][a] = 1;
+								} else if (p >= ts->snap_up_on[a]) {
+									p = ts->snap_up[a];
+									ts->snap_state[f][a] = 1;
+								}
+							}
+						}
+						pos[f][a] = p;
+						base += 2;
+						flip_flag <<= 1;
+					}
+					base += 2;
+					if (ts->flags & SYNAPTICS_SWAP_XY)
+						swap(pos[f][0], pos[f][1]);
+				}
+				if (z) {
+					input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+					input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+				}
+				input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+				input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+				input_report_key(ts->input_dev, BTN_TOUCH, finger);
+				finger2_pressed = finger > 1 && finger != 7;
+				input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+				if (finger2_pressed) {
+					input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
+					input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
+				}
+
+				if (!finger)
+					z = 0;
+				input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+				input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+				input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+				input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+				input_mt_sync(ts->input_dev);
+				if (finger2_pressed) {
+					input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+					input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+					input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+					input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+					input_mt_sync(ts->input_dev);
+				} else if (ts->reported_finger_count > 1) {
+					input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+					input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+					input_mt_sync(ts->input_dev);
+				}
+				ts->reported_finger_count = finger;
+				input_sync(ts->input_dev);
+			}
+		}
+	}
+	if (ts->use_irq)
+		enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+	struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+	/* printk("synaptics_ts_timer_func\n"); */
+
+	queue_work(synaptics_wq, &ts->work);
+
+	hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+	struct synaptics_ts_data *ts = dev_id;
+
+	/* printk("synaptics_ts_irq_handler\n"); */
+	disable_irq_nosync(ts->client->irq);
+	queue_work(synaptics_wq, &ts->work);
+	return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+	struct i2c_client *client, const struct i2c_device_id *id)
+{
+	struct synaptics_ts_data *ts;
+	uint8_t buf0[4];
+	uint8_t buf1[8];
+	struct i2c_msg msg[2];
+	int ret = 0;
+	uint16_t max_x, max_y;
+	int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+	struct synaptics_i2c_rmi_platform_data *pdata;
+	unsigned long irqflags;
+	int inactive_area_left;
+	int inactive_area_right;
+	int inactive_area_top;
+	int inactive_area_bottom;
+	int snap_left_on;
+	int snap_left_off;
+	int snap_right_on;
+	int snap_right_off;
+	int snap_top_on;
+	int snap_top_off;
+	int snap_bottom_on;
+	int snap_bottom_off;
+	uint32_t panel_version;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+		ret = -ENODEV;
+		goto err_check_functionality_failed;
+	}
+
+	ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+	if (ts == NULL) {
+		ret = -ENOMEM;
+		goto err_alloc_data_failed;
+	}
+	INIT_WORK(&ts->work, synaptics_ts_work_func);
+	ts->client = client;
+	i2c_set_clientdata(client, ts);
+	pdata = client->dev.platform_data;
+	if (pdata)
+		ts->power = pdata->power;
+	if (ts->power) {
+		ret = ts->power(1);
+		if (ret < 0) {
+			printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+			goto err_power_failed;
+		}
+	}
+
+	ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+		/* fail? */
+	}
+	{
+		int retry = 10;
+		while (retry-- > 0) {
+			ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+			if (ret >= 0)
+				break;
+			msleep(100);
+		}
+	}
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+		goto err_detect_failed;
+	}
+	printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+	panel_version = ret << 8;
+	ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+		goto err_detect_failed;
+	}
+	printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+	panel_version |= ret;
+
+	ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+		goto err_detect_failed;
+	}
+	printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+	if (pdata) {
+		while (pdata->version > panel_version)
+			pdata++;
+		ts->flags = pdata->flags;
+		ts->sensitivity_adjust = pdata->sensitivity_adjust;
+		irqflags = pdata->irqflags;
+		inactive_area_left = pdata->inactive_left;
+		inactive_area_right = pdata->inactive_right;
+		inactive_area_top = pdata->inactive_top;
+		inactive_area_bottom = pdata->inactive_bottom;
+		snap_left_on = pdata->snap_left_on;
+		snap_left_off = pdata->snap_left_off;
+		snap_right_on = pdata->snap_right_on;
+		snap_right_off = pdata->snap_right_off;
+		snap_top_on = pdata->snap_top_on;
+		snap_top_off = pdata->snap_top_off;
+		snap_bottom_on = pdata->snap_bottom_on;
+		snap_bottom_off = pdata->snap_bottom_off;
+		fuzz_x = pdata->fuzz_x;
+		fuzz_y = pdata->fuzz_y;
+		fuzz_p = pdata->fuzz_p;
+		fuzz_w = pdata->fuzz_w;
+	} else {
+		irqflags = 0;
+		inactive_area_left = 0;
+		inactive_area_right = 0;
+		inactive_area_top = 0;
+		inactive_area_bottom = 0;
+		snap_left_on = 0;
+		snap_left_off = 0;
+		snap_right_on = 0;
+		snap_right_off = 0;
+		snap_top_on = 0;
+		snap_top_off = 0;
+		snap_bottom_on = 0;
+		snap_bottom_off = 0;
+		fuzz_x = 0;
+		fuzz_y = 0;
+		fuzz_p = 0;
+		fuzz_w = 0;
+	}
+
+	ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+		goto err_detect_failed;
+	}
+	printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+	ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+		goto err_detect_failed;
+	}
+	printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+	ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+		goto err_detect_failed;
+	}
+
+	msg[0].addr = ts->client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = buf0;
+	buf0[0] = 0xe0;
+	msg[1].addr = ts->client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = 8;
+	msg[1].buf = buf1;
+	ret = i2c_transfer(ts->client->adapter, msg, 2);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_transfer failed\n");
+		goto err_detect_failed;
+	}
+	printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+	       buf1[0], buf1[1], buf1[2], buf1[3],
+	       buf1[4], buf1[5], buf1[6], buf1[7]);
+
+	ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+		goto err_detect_failed;
+	}
+	ret = i2c_smbus_read_word_data(ts->client, 0x02);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+		goto err_detect_failed;
+	}
+	ts->has_relative_report = !(ret & 0x100);
+	printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+	ret = i2c_smbus_read_word_data(ts->client, 0x04);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+		goto err_detect_failed;
+	}
+	ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+	ret = i2c_smbus_read_word_data(ts->client, 0x06);
+	if (ret < 0) {
+		printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+		goto err_detect_failed;
+	}
+	ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+	if (ts->flags & SYNAPTICS_SWAP_XY)
+		swap(max_x, max_y);
+
+	ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+	if (ret < 0) {
+		printk(KERN_ERR "synaptics_init_panel failed\n");
+		goto err_detect_failed;
+	}
+
+	ts->input_dev = input_allocate_device();
+	if (ts->input_dev == NULL) {
+		ret = -ENOMEM;
+		printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+		goto err_input_dev_alloc_failed;
+	}
+	ts->input_dev->name = "synaptics-rmi-touchscreen";
+	set_bit(EV_SYN, ts->input_dev->evbit);
+	set_bit(EV_KEY, ts->input_dev->evbit);
+	set_bit(BTN_TOUCH, ts->input_dev->keybit);
+	set_bit(BTN_2, ts->input_dev->keybit);
+	set_bit(EV_ABS, ts->input_dev->evbit);
+	inactive_area_left = inactive_area_left * max_x / 0x10000;
+	inactive_area_right = inactive_area_right * max_x / 0x10000;
+	inactive_area_top = inactive_area_top * max_y / 0x10000;
+	inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+	snap_left_on = snap_left_on * max_x / 0x10000;
+	snap_left_off = snap_left_off * max_x / 0x10000;
+	snap_right_on = snap_right_on * max_x / 0x10000;
+	snap_right_off = snap_right_off * max_x / 0x10000;
+	snap_top_on = snap_top_on * max_y / 0x10000;
+	snap_top_off = snap_top_off * max_y / 0x10000;
+	snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+	snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+	fuzz_x = fuzz_x * max_x / 0x10000;
+	fuzz_y = fuzz_y * max_y / 0x10000;
+	ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+	ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+	ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+	ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+	ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+	ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+	ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+	ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+	ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+	ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+	ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+	ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+	printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+	printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+	       inactive_area_left, inactive_area_right,
+	       inactive_area_top, inactive_area_bottom);
+	printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+	       snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+	       snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+	input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+	input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+	input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+	input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+	input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+	input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+	input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+	/* ts->input_dev->name = ts->keypad_info->name; */
+	ret = input_register_device(ts->input_dev);
+	if (ret) {
+		printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+		goto err_input_register_device_failed;
+	}
+	if (client->irq) {
+		ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+		if (ret == 0) {
+			ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+			if (ret)
+				free_irq(client->irq, ts);
+		}
+		if (ret == 0)
+			ts->use_irq = 1;
+		else
+			dev_err(&client->dev, "request_irq failed\n");
+	}
+	if (!ts->use_irq) {
+		hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ts->timer.function = synaptics_ts_timer_func;
+		hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+	}
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	ts->early_suspend.suspend = synaptics_ts_early_suspend;
+	ts->early_suspend.resume = synaptics_ts_late_resume;
+	register_early_suspend(&ts->early_suspend);
+#endif
+
+	printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+	return 0;
+
+err_input_register_device_failed:
+	input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+	kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+	return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+	struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+	unregister_early_suspend(&ts->early_suspend);
+	if (ts->use_irq)
+		free_irq(client->irq, ts);
+	else
+		hrtimer_cancel(&ts->timer);
+	input_unregister_device(ts->input_dev);
+	kfree(ts);
+	return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	int ret;
+	struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+	if (ts->use_irq)
+		disable_irq(client->irq);
+	else
+		hrtimer_cancel(&ts->timer);
+	ret = cancel_work_sync(&ts->work);
+	if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+		enable_irq(client->irq);
+	ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+	if (ret < 0)
+		printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+	ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+	if (ret < 0)
+		printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+	if (ts->power) {
+		ret = ts->power(0);
+		if (ret < 0)
+			printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+	}
+	return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+	int ret;
+	struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+	if (ts->power) {
+		ret = ts->power(1);
+		if (ret < 0)
+			printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+	}
+
+	synaptics_init_panel(ts);
+
+	if (ts->use_irq)
+		enable_irq(client->irq);
+
+	if (!ts->use_irq)
+		hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+	else
+		i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+	struct synaptics_ts_data *ts;
+	ts = container_of(h, struct synaptics_ts_data, early_suspend);
+	synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+	struct synaptics_ts_data *ts;
+	ts = container_of(h, struct synaptics_ts_data, early_suspend);
+	synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+	{ SYNAPTICS_I2C_RMI_NAME, 0 },
+	{ }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+	.probe		= synaptics_ts_probe,
+	.remove		= synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend	= synaptics_ts_suspend,
+	.resume		= synaptics_ts_resume,
+#endif
+	.id_table	= synaptics_ts_id,
+	.driver = {
+		.name	= SYNAPTICS_I2C_RMI_NAME,
+	},
+};
+
+static int __devinit synaptics_ts_init(void)
+{
+	synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+	if (!synaptics_wq)
+		return -ENOMEM;
+	return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+	i2c_del_driver(&synaptics_ts_driver);
+	if (synaptics_wq)
+		destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 713d43b..b84e46b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -466,6 +466,12 @@
 	  This allows LEDs to be initialised in the ON state.
 	  If unsure, say Y.
 
+config LEDS_TRIGGER_SLEEP
+	tristate "LED Sleep Mode Trigger"
+	depends on LEDS_TRIGGERS && HAS_EARLYSUSPEND
+	help
+	  This turns LEDs on when the screen is off but the cpu still running.
+
 comment "iptables trigger is under Netfilter config (LED target)"
 	depends on LEDS_TRIGGERS
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index bbfd2e3..cb77b9b 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -54,3 +54,4 @@
 obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT)	+= ledtrig-backlight.o
 obj-$(CONFIG_LEDS_TRIGGER_GPIO)		+= ledtrig-gpio.o
 obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON)	+= ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_SLEEP)	+= ledtrig-sleep.o
diff --git a/drivers/leds/ledtrig-sleep.c b/drivers/leds/ledtrig-sleep.c
new file mode 100644
index 0000000..f164042
--- /dev/null
+++ b/drivers/leds/ledtrig-sleep.c
@@ -0,0 +1,80 @@
+/* drivers/leds/ledtrig-sleep.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/leds.h>
+#include <linux/suspend.h>
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+					unsigned long action,
+					void *ignored);
+
+DEFINE_LED_TRIGGER(ledtrig_sleep)
+static struct notifier_block ledtrig_sleep_pm_notifier = {
+	.notifier_call = ledtrig_sleep_pm_callback,
+	.priority = 0,
+};
+
+static void ledtrig_sleep_early_suspend(struct early_suspend *h)
+{
+	led_trigger_event(ledtrig_sleep, LED_FULL);
+}
+
+static void ledtrig_sleep_early_resume(struct early_suspend *h)
+{
+	led_trigger_event(ledtrig_sleep, LED_OFF);
+}
+
+static struct early_suspend ledtrig_sleep_early_suspend_handler = {
+	.suspend = ledtrig_sleep_early_suspend,
+	.resume = ledtrig_sleep_early_resume,
+};
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+					unsigned long action,
+					void *ignored)
+{
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		led_trigger_event(ledtrig_sleep, LED_OFF);
+		return NOTIFY_OK;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		led_trigger_event(ledtrig_sleep, LED_FULL);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int __init ledtrig_sleep_init(void)
+{
+	led_trigger_register_simple("sleep", &ledtrig_sleep);
+	register_pm_notifier(&ledtrig_sleep_pm_notifier);
+	register_early_suspend(&ledtrig_sleep_early_suspend_handler);
+	return 0;
+}
+
+static void __exit ledtrig_sleep_exit(void)
+{
+	unregister_early_suspend(&ledtrig_sleep_early_suspend_handler);
+	unregister_pm_notifier(&ledtrig_sleep_pm_notifier);
+	led_trigger_unregister_simple(ledtrig_sleep);
+}
+
+module_init(ledtrig_sleep_init);
+module_exit(ledtrig_sleep_exit);
+
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4e349cd..b575799 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -61,6 +61,10 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad525x_dpot-spi.
 
+config ANDROID_PMEM
+	bool "Android pmem allocator"
+	default y
+
 config ATMEL_PWM
 	tristate "Atmel AT32/AT91 PWM support"
 	depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -229,6 +233,13 @@
 	  driver (SCSI/ATA) which supports enclosures
 	  or a SCSI enclosure device (SES) to use these services.
 
+config KERNEL_DEBUGGER_CORE
+	bool "Kernel Debugger Core"
+	default n
+	---help---
+	  Generic kernel debugging command processor used by low level
+	  (interrupt context) platform-specific debuggers.
+
 config SGI_XP
 	tristate "Support communication between SGI SSIs"
 	depends on NET
@@ -392,6 +403,14 @@
 	  This driver provides support for the Honeywell HMC6352 compass,
 	  providing configuration and heading data via sysfs.
 
+config SENSORS_AK8975
+	tristate "AK8975 compass support"
+	default n
+	depends on I2C
+	help
+	  If you say yes here you get support for Asahi Kasei's
+	  orientation sensor AK8975.
+
 config EP93XX_PWM
 	tristate "EP93xx PWM support"
 	depends on ARCH_EP93XX
@@ -435,6 +454,10 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called ti_dac7512.
 
+config UID_STAT
+	bool "UID based statistics tracking exported to /proc/uid_stat"
+	default n
+
 config VMWARE_BALLOON
 	tristate "VMware Balloon Driver"
 	depends on X86
@@ -490,6 +513,29 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called pch_phub.
 
+config WL127X_RFKILL
+	tristate "Bluetooth power control driver for TI wl127x"
+	depends on RFKILL
+	default n
+	---help---
+	 Creates an rfkill entry in sysfs for power control of Bluetooth
+	 TI wl127x chips.
+
+config APANIC
+	bool "Android kernel panic diagnostics driver"
+	default n
+	---help---
+	 Driver which handles kernel panics and attempts to write
+	 critical debugging data to flash.
+
+config APANIC_PLABEL
+	string "Android panic dump flash partition label"
+	depends on APANIC
+	default "kpanic"
+	---help---
+	 If your platform uses a different flash partition label for storing
+ 	 crashdumps, enter it here.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5f03172..2d43048 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -19,8 +19,10 @@
 obj-$(CONFIG_SENSORS_BH1780)	+= bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)	+= bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o
+obj-$(CONFIG_ANDROID_PMEM)	+= pmem.o
 obj-$(CONFIG_SGI_IOC4)		+= ioc4.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
+obj-$(CONFIG_KERNEL_DEBUGGER_CORE)	+= kernel_debugger.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
 obj-$(CONFIG_SGI_XP)		+= sgi-xp/
 obj-$(CONFIG_SGI_GRU)		+= sgi-gru/
@@ -33,6 +35,7 @@
 obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_DS1682)		+= ds1682.o
 obj-$(CONFIG_TI_DAC7512)	+= ti_dac7512.o
+obj-$(CONFIG_UID_STAT)		+= uid_stat.o
 obj-$(CONFIG_C2PORT)		+= c2port/
 obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-$(CONFIG_HMC6352)		+= hmc6352.o
@@ -46,3 +49,6 @@
 obj-$(CONFIG_AB8500_PWM)	+= ab8500-pwm.o
 obj-y				+= lis3lv02d/
 obj-y				+= carma/
+obj-$(CONFIG_WL127X_RFKILL)	+= wl127x-rfkill.o
+obj-$(CONFIG_APANIC)		+= apanic.o
+obj-$(CONFIG_SENSORS_AK8975)	+= akm8975.o
diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c
new file mode 100644
index 0000000..830d289
--- /dev/null
+++ b/drivers/misc/akm8975.c
@@ -0,0 +1,732 @@
+/* drivers/misc/akm8975.c - akm8975 compass driver
+ *
+ * Copyright (C) 2007-2008 HTC Corporation.
+ * Author: Hou-Kun Chen <houkun.chen@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Revised by AKM 2009/04/02
+ * Revised by Motorola 2010/05/27
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <linux/akm8975.h>
+#include <linux/earlysuspend.h>
+
+#define AK8975DRV_CALL_DBG 0
+#if AK8975DRV_CALL_DBG
+#define FUNCDBG(msg)	pr_err("%s:%s\n", __func__, msg);
+#else
+#define FUNCDBG(msg)
+#endif
+
+#define AK8975DRV_DATA_DBG 0
+#define MAX_FAILURE_COUNT 10
+
+struct akm8975_data {
+	struct i2c_client *this_client;
+	struct akm8975_platform_data *pdata;
+	struct input_dev *input_dev;
+	struct work_struct work;
+	struct mutex flags_lock;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+};
+
+/*
+* Because misc devices can not carry a pointer from driver register to
+* open, we keep this global. This limits the driver to a single instance.
+*/
+struct akm8975_data *akmd_data;
+
+static DECLARE_WAIT_QUEUE_HEAD(open_wq);
+
+static atomic_t open_flag;
+
+static short m_flag;
+static short a_flag;
+static short t_flag;
+static short mv_flag;
+
+static short akmd_delay;
+
+static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client,
+							     AK8975_REG_CNTL));
+}
+static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	unsigned long val;
+	strict_strtoul(buf, 10, &val);
+	if (val > 0xff)
+		return -EINVAL;
+	i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val);
+	return count;
+}
+static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store);
+
+static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = akm->this_client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = buf,
+		},
+		{
+			.addr = akm->this_client->addr,
+			.flags = I2C_M_RD,
+			.len = length,
+			.buf = buf,
+		},
+	};
+
+	FUNCDBG("called");
+
+	if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) {
+		pr_err("akm8975_i2c_rxdata: transfer error\n");
+		return EIO;
+	} else
+		return 0;
+}
+
+static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = akm->this_client->addr,
+			.flags = 0,
+			.len = length,
+			.buf = buf,
+		},
+	};
+
+	FUNCDBG("called");
+
+	if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) {
+		pr_err("akm8975_i2c_txdata: transfer error\n");
+		return -EIO;
+	} else
+		return 0;
+}
+
+static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf)
+{
+	struct akm8975_data *data = i2c_get_clientdata(akm->this_client);
+
+	FUNCDBG("called");
+
+#if AK8975DRV_DATA_DBG
+	pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n",
+				 rbuf[0], rbuf[1], rbuf[2]);
+	pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]);
+	pr_info("Acceleration:	 x = %d LSB, y = %d LSB, z = %d LSB\n",
+				 rbuf[6], rbuf[7], rbuf[8]);
+	pr_info("Magnetic:	 x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+				 rbuf[9], rbuf[10], rbuf[11]);
+#endif
+	mutex_lock(&akm->flags_lock);
+	/* Report magnetic sensor information */
+	if (m_flag) {
+		input_report_abs(data->input_dev, ABS_RX, rbuf[0]);
+		input_report_abs(data->input_dev, ABS_RY, rbuf[1]);
+		input_report_abs(data->input_dev, ABS_RZ, rbuf[2]);
+		input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]);
+	}
+
+	/* Report acceleration sensor information */
+	if (a_flag) {
+		input_report_abs(data->input_dev, ABS_X, rbuf[6]);
+		input_report_abs(data->input_dev, ABS_Y, rbuf[7]);
+		input_report_abs(data->input_dev, ABS_Z, rbuf[8]);
+		input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]);
+	}
+
+	/* Report temperature information */
+	if (t_flag)
+		input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]);
+
+	if (mv_flag) {
+		input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]);
+		input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]);
+		input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]);
+	}
+	mutex_unlock(&akm->flags_lock);
+
+	input_sync(data->input_dev);
+}
+
+static void akm8975_ecs_close_done(struct akm8975_data *akm)
+{
+	FUNCDBG("called");
+	mutex_lock(&akm->flags_lock);
+	m_flag = 1;
+	a_flag = 1;
+	t_flag = 1;
+	mv_flag = 1;
+	mutex_unlock(&akm->flags_lock);
+}
+
+static int akm_aot_open(struct inode *inode, struct file *file)
+{
+	int ret = -1;
+
+	FUNCDBG("called");
+	if (atomic_cmpxchg(&open_flag, 0, 1) == 0) {
+		wake_up(&open_wq);
+		ret = 0;
+	}
+
+	ret = nonseekable_open(inode, file);
+	if (ret)
+		return ret;
+
+	file->private_data = akmd_data;
+
+	return ret;
+}
+
+static int akm_aot_release(struct inode *inode, struct file *file)
+{
+	FUNCDBG("called");
+	atomic_set(&open_flag, 0);
+	wake_up(&open_wq);
+	return 0;
+}
+
+static int akm_aot_ioctl(struct inode *inode, struct file *file,
+	      unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *) arg;
+	short flag;
+	struct akm8975_data *akm = file->private_data;
+
+	FUNCDBG("called");
+
+	switch (cmd) {
+	case ECS_IOCTL_APP_SET_MFLAG:
+	case ECS_IOCTL_APP_SET_AFLAG:
+	case ECS_IOCTL_APP_SET_MVFLAG:
+		if (copy_from_user(&flag, argp, sizeof(flag)))
+			return -EFAULT;
+		if (flag < 0 || flag > 1)
+			return -EINVAL;
+		break;
+	case ECS_IOCTL_APP_SET_DELAY:
+		if (copy_from_user(&flag, argp, sizeof(flag)))
+			return -EFAULT;
+		break;
+	default:
+		break;
+	}
+
+	mutex_lock(&akm->flags_lock);
+	switch (cmd) {
+	case ECS_IOCTL_APP_SET_MFLAG:
+	  m_flag = flag;
+		break;
+	case ECS_IOCTL_APP_GET_MFLAG:
+		flag = m_flag;
+		break;
+	case ECS_IOCTL_APP_SET_AFLAG:
+		a_flag = flag;
+		break;
+	case ECS_IOCTL_APP_GET_AFLAG:
+		flag = a_flag;
+		break;
+	case ECS_IOCTL_APP_SET_MVFLAG:
+		mv_flag = flag;
+		break;
+	case ECS_IOCTL_APP_GET_MVFLAG:
+		flag = mv_flag;
+		break;
+	case ECS_IOCTL_APP_SET_DELAY:
+		akmd_delay = flag;
+		break;
+	case ECS_IOCTL_APP_GET_DELAY:
+		flag = akmd_delay;
+		break;
+	default:
+		return -ENOTTY;
+	}
+	mutex_unlock(&akm->flags_lock);
+
+	switch (cmd) {
+	case ECS_IOCTL_APP_GET_MFLAG:
+	case ECS_IOCTL_APP_GET_AFLAG:
+	case ECS_IOCTL_APP_GET_MVFLAG:
+	case ECS_IOCTL_APP_GET_DELAY:
+		if (copy_to_user(argp, &flag, sizeof(flag)))
+			return -EFAULT;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int akmd_open(struct inode *inode, struct file *file)
+{
+	int err = 0;
+
+	FUNCDBG("called");
+	err = nonseekable_open(inode, file);
+	if (err)
+		return err;
+
+	file->private_data = akmd_data;
+	return 0;
+}
+
+static int akmd_release(struct inode *inode, struct file *file)
+{
+	struct akm8975_data *akm = file->private_data;
+
+	FUNCDBG("called");
+	akm8975_ecs_close_done(akm);
+	return 0;
+}
+
+static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+		      unsigned long arg)
+{
+	void __user *argp = (void __user *) arg;
+
+	char rwbuf[16];
+	int ret = -1;
+	int status;
+	short value[12];
+	short delay;
+	struct akm8975_data *akm = file->private_data;
+
+	FUNCDBG("called");
+
+	switch (cmd) {
+	case ECS_IOCTL_READ:
+	case ECS_IOCTL_WRITE:
+		if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
+			return -EFAULT;
+		break;
+
+	case ECS_IOCTL_SET_YPR:
+		if (copy_from_user(&value, argp, sizeof(value)))
+			return -EFAULT;
+		break;
+
+	default:
+		break;
+	}
+
+	switch (cmd) {
+	case ECS_IOCTL_READ:
+		if (rwbuf[0] < 1)
+			return -EINVAL;
+
+		ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]);
+		if (ret < 0)
+			return ret;
+		break;
+
+	case ECS_IOCTL_WRITE:
+		if (rwbuf[0] < 2)
+			return -EINVAL;
+
+		ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]);
+		if (ret < 0)
+			return ret;
+		break;
+	case ECS_IOCTL_SET_YPR:
+		akm8975_ecs_report_value(akm, value);
+		break;
+
+	case ECS_IOCTL_GET_OPEN_STATUS:
+		wait_event_interruptible(open_wq,
+					 (atomic_read(&open_flag) != 0));
+		status = atomic_read(&open_flag);
+		break;
+	case ECS_IOCTL_GET_CLOSE_STATUS:
+		wait_event_interruptible(open_wq,
+					 (atomic_read(&open_flag) == 0));
+		status = atomic_read(&open_flag);
+		break;
+
+	case ECS_IOCTL_GET_DELAY:
+		delay = akmd_delay;
+		break;
+
+	default:
+		FUNCDBG("Unknown cmd\n");
+		return -ENOTTY;
+	}
+
+	switch (cmd) {
+	case ECS_IOCTL_READ:
+		if (copy_to_user(argp, &rwbuf, sizeof(rwbuf)))
+			return -EFAULT;
+		break;
+	case ECS_IOCTL_GET_OPEN_STATUS:
+	case ECS_IOCTL_GET_CLOSE_STATUS:
+		if (copy_to_user(argp, &status, sizeof(status)))
+			return -EFAULT;
+		break;
+	case ECS_IOCTL_GET_DELAY:
+		if (copy_to_user(argp, &delay, sizeof(delay)))
+			return -EFAULT;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* needed to clear the int. pin */
+static void akm_work_func(struct work_struct *work)
+{
+	struct akm8975_data *akm =
+	    container_of(work, struct akm8975_data, work);
+
+	FUNCDBG("called");
+	enable_irq(akm->this_client->irq);
+}
+
+static irqreturn_t akm8975_interrupt(int irq, void *dev_id)
+{
+	struct akm8975_data *akm = dev_id;
+	FUNCDBG("called");
+
+	disable_irq_nosync(akm->this_client->irq);
+	schedule_work(&akm->work);
+	return IRQ_HANDLED;
+}
+
+static int akm8975_power_off(struct akm8975_data *akm)
+{
+#if AK8975DRV_CALL_DBG
+	pr_info("%s\n", __func__);
+#endif
+	if (akm->pdata->power_off)
+		akm->pdata->power_off();
+
+	return 0;
+}
+
+static int akm8975_power_on(struct akm8975_data *akm)
+{
+	int err;
+
+#if AK8975DRV_CALL_DBG
+	pr_info("%s\n", __func__);
+#endif
+	if (akm->pdata->power_on) {
+		err = akm->pdata->power_on();
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+	pr_info("%s\n", __func__);
+#endif
+	/* TO DO: might need more work after power mgmt
+	   is enabled */
+	return akm8975_power_off(akm);
+}
+
+static int akm8975_resume(struct i2c_client *client)
+{
+	struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+	pr_info("%s\n", __func__);
+#endif
+	/* TO DO: might need more work after power mgmt
+	   is enabled */
+	return akm8975_power_on(akm);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void akm8975_early_suspend(struct early_suspend *handler)
+{
+	struct akm8975_data *akm;
+	akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+	pr_info("%s\n", __func__);
+#endif
+	akm8975_suspend(akm->this_client, PMSG_SUSPEND);
+}
+
+static void akm8975_early_resume(struct early_suspend *handler)
+{
+	struct akm8975_data *akm;
+	akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+	pr_info("%s\n", __func__);
+#endif
+	akm8975_resume(akm->this_client);
+}
+#endif
+
+
+static int akm8975_init_client(struct i2c_client *client)
+{
+	struct akm8975_data *data;
+	int ret;
+
+	data = i2c_get_clientdata(client);
+
+	ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING,
+				"akm8975", data);
+
+	if (ret < 0) {
+		pr_err("akm8975_init_client: request irq failed\n");
+		goto err;
+	}
+
+	init_waitqueue_head(&open_wq);
+
+	mutex_lock(&data->flags_lock);
+	m_flag = 1;
+	a_flag = 1;
+	t_flag = 1;
+	mv_flag = 1;
+	mutex_unlock(&data->flags_lock);
+
+	return 0;
+err:
+  return ret;
+}
+
+static const struct file_operations akmd_fops = {
+	.owner = THIS_MODULE,
+	.open = akmd_open,
+	.release = akmd_release,
+	.ioctl = akmd_ioctl,
+};
+
+static const struct file_operations akm_aot_fops = {
+	.owner = THIS_MODULE,
+	.open = akm_aot_open,
+	.release = akm_aot_release,
+	.ioctl = akm_aot_ioctl,
+};
+
+static struct miscdevice akm_aot_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "akm8975_aot",
+	.fops = &akm_aot_fops,
+};
+
+static struct miscdevice akmd_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "akm8975_dev",
+	.fops = &akmd_fops,
+};
+
+int akm8975_probe(struct i2c_client *client,
+		  const struct i2c_device_id *devid)
+{
+	struct akm8975_data *akm;
+	int err;
+	FUNCDBG("called");
+
+	if (client->dev.platform_data == NULL) {
+		dev_err(&client->dev, "platform data is NULL. exiting.\n");
+		err = -ENODEV;
+		goto exit_platform_data_null;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "platform data is NULL. exiting.\n");
+		err = -ENODEV;
+		goto exit_check_functionality_failed;
+	}
+
+	akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL);
+	if (!akm) {
+		dev_err(&client->dev,
+			"failed to allocate memory for module data\n");
+		err = -ENOMEM;
+		goto exit_alloc_data_failed;
+	}
+
+	akm->pdata = client->dev.platform_data;
+
+	mutex_init(&akm->flags_lock);
+	INIT_WORK(&akm->work, akm_work_func);
+	i2c_set_clientdata(client, akm);
+
+	err = akm8975_power_on(akm);
+	if (err < 0)
+		goto exit_power_on_failed;
+
+	akm8975_init_client(client);
+	akm->this_client = client;
+	akmd_data = akm;
+
+	akm->input_dev = input_allocate_device();
+	if (!akm->input_dev) {
+		err = -ENOMEM;
+		dev_err(&akm->this_client->dev,
+			"input device allocate failed\n");
+		goto exit_input_dev_alloc_failed;
+	}
+
+	set_bit(EV_ABS, akm->input_dev->evbit);
+
+	/* yaw */
+	input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0);
+	/* pitch */
+	input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0);
+	/* roll */
+	input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0);
+	/* x-axis acceleration */
+	input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0);
+	/* y-axis acceleration */
+	input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0);
+	/* z-axis acceleration */
+	input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0);
+	/* temparature */
+	input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0);
+	/* status of magnetic sensor */
+	input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0);
+	/* status of acceleration sensor */
+	input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0);
+	/* x-axis of raw magnetic vector */
+	input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0);
+	/* y-axis of raw magnetic vector */
+	input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0);
+	/* z-axis of raw magnetic vector */
+	input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0);
+
+	akm->input_dev->name = "compass";
+
+	err = input_register_device(akm->input_dev);
+	if (err) {
+		pr_err("akm8975_probe: Unable to register input device: %s\n",
+					 akm->input_dev->name);
+		goto exit_input_register_device_failed;
+	}
+
+	err = misc_register(&akmd_device);
+	if (err) {
+		pr_err("akm8975_probe: akmd_device register failed\n");
+		goto exit_misc_device_register_failed;
+	}
+
+	err = misc_register(&akm_aot_device);
+	if (err) {
+		pr_err("akm8975_probe: akm_aot_device register failed\n");
+		goto exit_misc_device_register_failed;
+	}
+
+	err = device_create_file(&client->dev, &dev_attr_akm_ms1);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	akm->early_suspend.suspend = akm8975_early_suspend;
+	akm->early_suspend.resume = akm8975_early_resume;
+	register_early_suspend(&akm->early_suspend);
+#endif
+	return 0;
+
+exit_misc_device_register_failed:
+exit_input_register_device_failed:
+	input_free_device(akm->input_dev);
+exit_input_dev_alloc_failed:
+	akm8975_power_off(akm);
+exit_power_on_failed:
+	kfree(akm);
+exit_alloc_data_failed:
+exit_check_functionality_failed:
+exit_platform_data_null:
+	return err;
+}
+
+static int __devexit akm8975_remove(struct i2c_client *client)
+{
+	struct akm8975_data *akm = i2c_get_clientdata(client);
+	FUNCDBG("called");
+	free_irq(client->irq, NULL);
+	input_unregister_device(akm->input_dev);
+	misc_deregister(&akmd_device);
+	misc_deregister(&akm_aot_device);
+	akm8975_power_off(akm);
+	kfree(akm);
+	return 0;
+}
+
+static const struct i2c_device_id akm8975_id[] = {
+	{ "akm8975", 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, akm8975_id);
+
+static struct i2c_driver akm8975_driver = {
+	.probe = akm8975_probe,
+	.remove = akm8975_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.resume = akm8975_resume,
+	.suspend = akm8975_suspend,
+#endif
+	.id_table = akm8975_id,
+	.driver = {
+		.name = "akm8975",
+	},
+};
+
+static int __init akm8975_init(void)
+{
+	pr_info("AK8975 compass driver: init\n");
+	FUNCDBG("AK8975 compass driver: init\n");
+	return i2c_add_driver(&akm8975_driver);
+}
+
+static void __exit akm8975_exit(void)
+{
+	FUNCDBG("AK8975 compass driver: exit\n");
+	i2c_del_driver(&akm8975_driver);
+}
+
+module_init(akm8975_init);
+module_exit(akm8975_exit);
+
+MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>");
+MODULE_DESCRIPTION("AK8975 compass driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/apanic.c b/drivers/misc/apanic.c
new file mode 100644
index 0000000..ca875f8
--- /dev/null
+++ b/drivers/misc/apanic.c
@@ -0,0 +1,606 @@
+/* drivers/misc/apanic.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+#include <linux/notifier.h>
+#include <linux/mtd/mtd.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+
+extern void ram_console_enable_console(int);
+
+struct panic_header {
+	u32 magic;
+#define PANIC_MAGIC 0xdeadf00d
+
+	u32 version;
+#define PHDR_VERSION   0x01
+
+	u32 console_offset;
+	u32 console_length;
+
+	u32 threads_offset;
+	u32 threads_length;
+};
+
+struct apanic_data {
+	struct mtd_info		*mtd;
+	struct panic_header	curr;
+	void			*bounce;
+	struct proc_dir_entry	*apanic_console;
+	struct proc_dir_entry	*apanic_threads;
+};
+
+static struct apanic_data drv_ctx;
+static struct work_struct proc_removal_work;
+static DEFINE_MUTEX(drv_mutex);
+
+static unsigned int *apanic_bbt;
+static unsigned int apanic_erase_blocks;
+static unsigned int apanic_good_blocks;
+
+static void set_bb(unsigned int block, unsigned int *bbt)
+{
+	unsigned int flag = 1;
+
+	BUG_ON(block >= apanic_erase_blocks);
+
+	flag = flag << (block%32);
+	apanic_bbt[block/32] |= flag;
+	apanic_good_blocks--;
+}
+
+static unsigned int get_bb(unsigned int block, unsigned int *bbt)
+{
+	unsigned int flag;
+
+	BUG_ON(block >= apanic_erase_blocks);
+
+	flag = 1 << (block%32);
+	return apanic_bbt[block/32] & flag;
+}
+
+static void alloc_bbt(struct mtd_info *mtd, unsigned int *bbt)
+{
+	int bbt_size;
+	apanic_erase_blocks = (mtd->size)>>(mtd->erasesize_shift);
+	bbt_size = (apanic_erase_blocks+32)/32;
+
+	apanic_bbt = kmalloc(bbt_size*4, GFP_KERNEL);
+	memset(apanic_bbt, 0, bbt_size*4);
+	apanic_good_blocks = apanic_erase_blocks;
+}
+static void scan_bbt(struct mtd_info *mtd, unsigned int *bbt)
+{
+	int i;
+
+	for (i = 0; i < apanic_erase_blocks; i++) {
+		if (mtd->block_isbad(mtd, i*mtd->erasesize))
+			set_bb(i, apanic_bbt);
+	}
+}
+
+#define APANIC_INVALID_OFFSET 0xFFFFFFFF
+
+static unsigned int phy_offset(struct mtd_info *mtd, unsigned int offset)
+{
+	unsigned int logic_block = offset>>(mtd->erasesize_shift);
+	unsigned int phy_block;
+	unsigned good_block = 0;
+
+	for (phy_block = 0; phy_block < apanic_erase_blocks; phy_block++) {
+		if (!get_bb(phy_block, apanic_bbt))
+			good_block++;
+		if (good_block == (logic_block + 1))
+			break;
+	}
+
+	if (good_block != (logic_block + 1))
+		return APANIC_INVALID_OFFSET;
+
+	return offset + ((phy_block-logic_block)<<mtd->erasesize_shift);
+}
+
+static void apanic_erase_callback(struct erase_info *done)
+{
+	wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv;
+	wake_up(wait_q);
+}
+
+static int apanic_proc_read(char *buffer, char **start, off_t offset,
+			       int count, int *peof, void *dat)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	size_t file_length;
+	off_t file_offset;
+	unsigned int page_no;
+	off_t page_offset;
+	int rc;
+	size_t len;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&drv_mutex);
+
+	switch ((int) dat) {
+	case 1:	/* apanic_console */
+		file_length = ctx->curr.console_length;
+		file_offset = ctx->curr.console_offset;
+		break;
+	case 2:	/* apanic_threads */
+		file_length = ctx->curr.threads_length;
+		file_offset = ctx->curr.threads_offset;
+		break;
+	default:
+		pr_err("Bad dat (%d)\n", (int) dat);
+		mutex_unlock(&drv_mutex);
+		return -EINVAL;
+	}
+
+	if ((offset + count) > file_length) {
+		mutex_unlock(&drv_mutex);
+		return 0;
+	}
+
+	/* We only support reading a maximum of a flash page */
+	if (count > ctx->mtd->writesize)
+		count = ctx->mtd->writesize;
+
+	page_no = (file_offset + offset) / ctx->mtd->writesize;
+	page_offset = (file_offset + offset) % ctx->mtd->writesize;
+
+
+	if (phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize))
+		== APANIC_INVALID_OFFSET) {
+		pr_err("apanic: reading an invalid address\n");
+		mutex_unlock(&drv_mutex);
+		return -EINVAL;
+	}
+	rc = ctx->mtd->read(ctx->mtd,
+		phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)),
+		ctx->mtd->writesize,
+		&len, ctx->bounce);
+
+	if (page_offset)
+		count -= page_offset;
+	memcpy(buffer, ctx->bounce + page_offset, count);
+
+	*start = count;
+
+	if ((offset + count) == file_length)
+		*peof = 1;
+
+	mutex_unlock(&drv_mutex);
+	return count;
+}
+
+static void mtd_panic_erase(void)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	struct erase_info erase;
+	DECLARE_WAITQUEUE(wait, current);
+	wait_queue_head_t wait_q;
+	int rc, i;
+
+	init_waitqueue_head(&wait_q);
+	erase.mtd = ctx->mtd;
+	erase.callback = apanic_erase_callback;
+	erase.len = ctx->mtd->erasesize;
+	erase.priv = (u_long)&wait_q;
+	for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) {
+		erase.addr = i;
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&wait_q, &wait);
+
+		if (get_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt)) {
+			printk(KERN_WARNING
+			       "apanic: Skipping erase of bad "
+			       "block @%llx\n", erase.addr);
+			set_current_state(TASK_RUNNING);
+			remove_wait_queue(&wait_q, &wait);
+			continue;
+		}
+
+		rc = ctx->mtd->erase(ctx->mtd, &erase);
+		if (rc) {
+			set_current_state(TASK_RUNNING);
+			remove_wait_queue(&wait_q, &wait);
+			printk(KERN_ERR
+			       "apanic: Erase of 0x%llx, 0x%llx failed\n",
+			       (unsigned long long) erase.addr,
+			       (unsigned long long) erase.len);
+			if (rc == -EIO) {
+				if (ctx->mtd->block_markbad(ctx->mtd,
+							    erase.addr)) {
+					printk(KERN_ERR
+					       "apanic: Err marking blk bad\n");
+					goto out;
+				}
+				printk(KERN_INFO
+				       "apanic: Marked a bad block"
+				       " @%llx\n", erase.addr);
+				set_bb(erase.addr>>ctx->mtd->erasesize_shift,
+					apanic_bbt);
+				continue;
+			}
+			goto out;
+		}
+		schedule();
+		remove_wait_queue(&wait_q, &wait);
+	}
+	printk(KERN_DEBUG "apanic: %s partition erased\n",
+	       CONFIG_APANIC_PLABEL);
+out:
+	return;
+}
+
+static void apanic_remove_proc_work(struct work_struct *work)
+{
+	struct apanic_data *ctx = &drv_ctx;
+
+	mutex_lock(&drv_mutex);
+	mtd_panic_erase();
+	memset(&ctx->curr, 0, sizeof(struct panic_header));
+	if (ctx->apanic_console) {
+		remove_proc_entry("apanic_console", NULL);
+		ctx->apanic_console = NULL;
+	}
+	if (ctx->apanic_threads) {
+		remove_proc_entry("apanic_threads", NULL);
+		ctx->apanic_threads = NULL;
+	}
+	mutex_unlock(&drv_mutex);
+}
+
+static int apanic_proc_write(struct file *file, const char __user *buffer,
+				unsigned long count, void *data)
+{
+	schedule_work(&proc_removal_work);
+	return count;
+}
+
+static void mtd_panic_notify_add(struct mtd_info *mtd)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	struct panic_header *hdr = ctx->bounce;
+	size_t len;
+	int rc;
+	int    proc_entry_created = 0;
+
+	if (strcmp(mtd->name, CONFIG_APANIC_PLABEL))
+		return;
+
+	ctx->mtd = mtd;
+
+	alloc_bbt(mtd, apanic_bbt);
+	scan_bbt(mtd, apanic_bbt);
+
+	if (apanic_good_blocks == 0) {
+		printk(KERN_ERR "apanic: no any good blocks?!\n");
+		goto out_err;
+	}
+
+	rc = mtd->read(mtd, phy_offset(mtd, 0), mtd->writesize,
+			&len, ctx->bounce);
+	if (rc && rc == -EBADMSG) {
+		printk(KERN_WARNING
+		       "apanic: Bad ECC on block 0 (ignored)\n");
+	} else if (rc && rc != -EUCLEAN) {
+		printk(KERN_ERR "apanic: Error reading block 0 (%d)\n", rc);
+		goto out_err;
+	}
+
+	if (len != mtd->writesize) {
+		printk(KERN_ERR "apanic: Bad read size (%d)\n", rc);
+		goto out_err;
+	}
+
+	printk(KERN_INFO "apanic: Bound to mtd partition '%s'\n", mtd->name);
+
+	if (hdr->magic != PANIC_MAGIC) {
+		printk(KERN_INFO "apanic: No panic data available\n");
+		mtd_panic_erase();
+		return;
+	}
+
+	if (hdr->version != PHDR_VERSION) {
+		printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n",
+		       hdr->version, PHDR_VERSION);
+		mtd_panic_erase();
+		return;
+	}
+
+	memcpy(&ctx->curr, hdr, sizeof(struct panic_header));
+
+	printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u)\n",
+	       hdr->console_offset, hdr->console_length,
+	       hdr->threads_offset, hdr->threads_length);
+
+	if (hdr->console_length) {
+		ctx->apanic_console = create_proc_entry("apanic_console",
+						      S_IFREG | S_IRUGO, NULL);
+		if (!ctx->apanic_console)
+			printk(KERN_ERR "%s: failed creating procfile\n",
+			       __func__);
+		else {
+			ctx->apanic_console->read_proc = apanic_proc_read;
+			ctx->apanic_console->write_proc = apanic_proc_write;
+			ctx->apanic_console->size = hdr->console_length;
+			ctx->apanic_console->data = (void *) 1;
+			proc_entry_created = 1;
+		}
+	}
+
+	if (hdr->threads_length) {
+		ctx->apanic_threads = create_proc_entry("apanic_threads",
+						       S_IFREG | S_IRUGO, NULL);
+		if (!ctx->apanic_threads)
+			printk(KERN_ERR "%s: failed creating procfile\n",
+			       __func__);
+		else {
+			ctx->apanic_threads->read_proc = apanic_proc_read;
+			ctx->apanic_threads->write_proc = apanic_proc_write;
+			ctx->apanic_threads->size = hdr->threads_length;
+			ctx->apanic_threads->data = (void *) 2;
+			proc_entry_created = 1;
+		}
+	}
+
+	if (!proc_entry_created)
+		mtd_panic_erase();
+
+	return;
+out_err:
+	ctx->mtd = NULL;
+}
+
+static void mtd_panic_notify_remove(struct mtd_info *mtd)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	if (mtd == ctx->mtd) {
+		ctx->mtd = NULL;
+		printk(KERN_INFO "apanic: Unbound from %s\n", mtd->name);
+	}
+}
+
+static struct mtd_notifier mtd_panic_notifier = {
+	.add	= mtd_panic_notify_add,
+	.remove	= mtd_panic_notify_remove,
+};
+
+static int in_panic = 0;
+
+static int apanic_writeflashpage(struct mtd_info *mtd, loff_t to,
+				 const u_char *buf)
+{
+	int rc;
+	size_t wlen;
+	int panic = in_interrupt() | in_atomic();
+
+	if (panic && !mtd->panic_write) {
+		printk(KERN_EMERG "%s: No panic_write available\n", __func__);
+		return 0;
+	} else if (!panic && !mtd->write) {
+		printk(KERN_EMERG "%s: No write available\n", __func__);
+		return 0;
+	}
+
+	to = phy_offset(mtd, to);
+	if (to == APANIC_INVALID_OFFSET) {
+		printk(KERN_EMERG "apanic: write to invalid address\n");
+		return 0;
+	}
+
+	if (panic)
+		rc = mtd->panic_write(mtd, to, mtd->writesize, &wlen, buf);
+	else
+		rc = mtd->write(mtd, to, mtd->writesize, &wlen, buf);
+
+	if (rc) {
+		printk(KERN_EMERG
+		       "%s: Error writing data to flash (%d)\n",
+		       __func__, rc);
+		return rc;
+	}
+
+	return wlen;
+}
+
+extern int log_buf_copy(char *dest, int idx, int len);
+extern void log_buf_clear(void);
+
+/*
+ * Writes the contents of the console to the specified offset in flash.
+ * Returns number of bytes written
+ */
+static int apanic_write_console(struct mtd_info *mtd, unsigned int off)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	int saved_oip;
+	int idx = 0;
+	int rc, rc2;
+	unsigned int last_chunk = 0;
+
+	while (!last_chunk) {
+		saved_oip = oops_in_progress;
+		oops_in_progress = 1;
+		rc = log_buf_copy(ctx->bounce, idx, mtd->writesize);
+		if (rc < 0)
+			break;
+
+		if (rc != mtd->writesize)
+			last_chunk = rc;
+
+		oops_in_progress = saved_oip;
+		if (rc <= 0)
+			break;
+		if (rc != mtd->writesize)
+			memset(ctx->bounce + rc, 0, mtd->writesize - rc);
+
+		rc2 = apanic_writeflashpage(mtd, off, ctx->bounce);
+		if (rc2 <= 0) {
+			printk(KERN_EMERG
+			       "apanic: Flash write failed (%d)\n", rc2);
+			return idx;
+		}
+		if (!last_chunk)
+			idx += rc2;
+		else
+			idx += last_chunk;
+		off += rc2;
+	}
+	return idx;
+}
+
+static int apanic(struct notifier_block *this, unsigned long event,
+			void *ptr)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	struct panic_header *hdr = (struct panic_header *) ctx->bounce;
+	int console_offset = 0;
+	int console_len = 0;
+	int threads_offset = 0;
+	int threads_len = 0;
+	int rc;
+
+	if (in_panic)
+		return NOTIFY_DONE;
+	in_panic = 1;
+#ifdef CONFIG_PREEMPT
+	/* Ensure that cond_resched() won't try to preempt anybody */
+	add_preempt_count(PREEMPT_ACTIVE);
+#endif
+	touch_softlockup_watchdog();
+
+	if (!ctx->mtd)
+		goto out;
+
+	if (ctx->curr.magic) {
+		printk(KERN_EMERG "Crash partition in use!\n");
+		goto out;
+	}
+	console_offset = ctx->mtd->writesize;
+
+	/*
+	 * Write out the console
+	 */
+	console_len = apanic_write_console(ctx->mtd, console_offset);
+	if (console_len < 0) {
+		printk(KERN_EMERG "Error writing console to panic log! (%d)\n",
+		       console_len);
+		console_len = 0;
+	}
+
+	/*
+	 * Write out all threads
+	 */
+	threads_offset = ALIGN(console_offset + console_len,
+			       ctx->mtd->writesize);
+	if (!threads_offset)
+		threads_offset = ctx->mtd->writesize;
+
+	ram_console_enable_console(0);
+
+	log_buf_clear();
+	show_state_filter(0);
+	threads_len = apanic_write_console(ctx->mtd, threads_offset);
+	if (threads_len < 0) {
+		printk(KERN_EMERG "Error writing threads to panic log! (%d)\n",
+		       threads_len);
+		threads_len = 0;
+	}
+
+	/*
+	 * Finally write the panic header
+	 */
+	memset(ctx->bounce, 0, PAGE_SIZE);
+	hdr->magic = PANIC_MAGIC;
+	hdr->version = PHDR_VERSION;
+
+	hdr->console_offset = console_offset;
+	hdr->console_length = console_len;
+
+	hdr->threads_offset = threads_offset;
+	hdr->threads_length = threads_len;
+
+	rc = apanic_writeflashpage(ctx->mtd, 0, ctx->bounce);
+	if (rc <= 0) {
+		printk(KERN_EMERG "apanic: Header write failed (%d)\n",
+		       rc);
+		goto out;
+	}
+
+	printk(KERN_EMERG "apanic: Panic dump sucessfully written to flash\n");
+
+ out:
+#ifdef CONFIG_PREEMPT
+	sub_preempt_count(PREEMPT_ACTIVE);
+#endif
+	in_panic = 0;
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+	.notifier_call	= apanic,
+};
+
+static int panic_dbg_get(void *data, u64 *val)
+{
+	apanic(NULL, 0, NULL);
+	return 0;
+}
+
+static int panic_dbg_set(void *data, u64 val)
+{
+	BUG();
+	return -1;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n");
+
+int __init apanic_init(void)
+{
+	register_mtd_user(&mtd_panic_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+	debugfs_create_file("apanic", 0644, NULL, NULL, &panic_dbg_fops);
+	memset(&drv_ctx, 0, sizeof(drv_ctx));
+	drv_ctx.bounce = (void *) __get_free_page(GFP_KERNEL);
+	INIT_WORK(&proc_removal_work, apanic_remove_proc_work);
+	printk(KERN_INFO "Android kernel panic handler initialized (bind=%s)\n",
+	       CONFIG_APANIC_PLABEL);
+	return 0;
+}
+
+module_init(apanic_init);
diff --git a/drivers/misc/kernel_debugger.c b/drivers/misc/kernel_debugger.c
new file mode 100644
index 0000000..4a9fef6
--- /dev/null
+++ b/drivers/misc/kernel_debugger.c
@@ -0,0 +1,89 @@
+/* drivers/android/kernel_debugger.c
+ *
+ * Guts of the kernel debugger.
+ * Needs something to actually push commands to it.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/kernel_debugger.h>
+
+#define dprintf(fmt...) (ctxt->printf(ctxt->cookie, fmt))
+
+static void do_ps(struct kdbg_ctxt *ctxt)
+{
+	struct task_struct *g, *p;
+	unsigned state;
+	static const char stat_nam[] = "RSDTtZX";
+
+	dprintf("pid   ppid  prio task            pc\n");
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		state = p->state ? __ffs(p->state) + 1 : 0;
+		dprintf("%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+		dprintf("%-13.13s %c", p->comm,
+			state >= sizeof(stat_nam) ? '?' : stat_nam[state]);
+		if (state == TASK_RUNNING)
+			dprintf(" running\n");
+		else
+			dprintf(" %08lx\n", thread_saved_pc(p));
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+}
+
+int log_buf_copy(char *dest, int idx, int len);
+extern int do_syslog(int type, char __user *bug, int count);
+static void do_sysrq(struct kdbg_ctxt *ctxt, char rq)
+{
+	char buf[128];
+	int ret;
+	int idx = 0;
+	do_syslog(5 /* clear */, NULL, 0);
+	handle_sysrq(rq);
+	while (1) {
+		ret = log_buf_copy(buf, idx, sizeof(buf) - 1);
+		if (ret <= 0)
+			break;
+		buf[ret] = 0;
+		dprintf("%s", buf);
+		idx += ret;
+	}
+}
+
+static void do_help(struct kdbg_ctxt *ctxt)
+{
+	dprintf("Kernel Debugger commands:\n");
+	dprintf(" ps            Process list\n");
+	dprintf(" sysrq         sysrq options\n");
+	dprintf(" sysrq <param> Execute sysrq with <param>\n");
+}
+
+int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd)
+{
+	if (!strcmp(cmd, "ps"))
+		do_ps(ctxt);
+	if (!strcmp(cmd, "sysrq"))
+		do_sysrq(ctxt, 'h');
+	if (!strncmp(cmd, "sysrq ", 6))
+		do_sysrq(ctxt, cmd[6]);
+	if (!strcmp(cmd, "help"))
+		do_help(ctxt);
+
+	return 0;
+}
+
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
new file mode 100644
index 0000000..abb73c1
--- /dev/null
+++ b/drivers/misc/pmem.c
@@ -0,0 +1,1345 @@
+/* drivers/android/pmem.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/android_pmem.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+#define PMEM_MAX_DEVICES 10
+#define PMEM_MAX_ORDER 128
+#define PMEM_MIN_ALLOC PAGE_SIZE
+
+#define PMEM_DEBUG 1
+
+/* indicates that a refernce to this file has been taken via get_pmem_file,
+ * the file should not be released until put_pmem_file is called */
+#define PMEM_FLAGS_BUSY 0x1
+/* indicates that this is a suballocation of a larger master range */
+#define PMEM_FLAGS_CONNECTED 0x1 << 1
+/* indicates this is a master and not a sub allocation and that it is mmaped */
+#define PMEM_FLAGS_MASTERMAP 0x1 << 2
+/* submap and unsubmap flags indicate:
+ * 00: subregion has never been mmaped
+ * 10: subregion has been mmaped, reference to the mm was taken
+ * 11: subretion has ben released, refernece to the mm still held
+ * 01: subretion has been released, reference to the mm has been released
+ */
+#define PMEM_FLAGS_SUBMAP 0x1 << 3
+#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
+
+
+struct pmem_data {
+	/* in alloc mode: an index into the bitmap
+	 * in no_alloc mode: the size of the allocation */
+	int index;
+	/* see flags above for descriptions */
+	unsigned int flags;
+	/* protects this data field, if the mm_mmap sem will be held at the
+	 * same time as this sem, the mm sem must be taken first (as this is
+	 * the order for vma_open and vma_close ops */
+	struct rw_semaphore sem;
+	/* info about the mmaping process */
+	struct vm_area_struct *vma;
+	/* task struct of the mapping process */
+	struct task_struct *task;
+	/* process id of teh mapping process */
+	pid_t pid;
+	/* file descriptor of the master */
+	int master_fd;
+	/* file struct of the master */
+	struct file *master_file;
+	/* a list of currently available regions if this is a suballocation */
+	struct list_head region_list;
+	/* a linked list of data so we can access them for debugging */
+	struct list_head list;
+#if PMEM_DEBUG
+	int ref;
+#endif
+};
+
+struct pmem_bits {
+	unsigned allocated:1;		/* 1 if allocated, 0 if free */
+	unsigned order:7;		/* size of the region in pmem space */
+};
+
+struct pmem_region_node {
+	struct pmem_region region;
+	struct list_head list;
+};
+
+#define PMEM_DEBUG_MSGS 0
+#if PMEM_DEBUG_MSGS
+#define DLOG(fmt,args...) \
+	do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+		    ##args); } \
+	while (0)
+#else
+#define DLOG(x...) do {} while (0)
+#endif
+
+struct pmem_info {
+	struct miscdevice dev;
+	/* physical start address of the remaped pmem space */
+	unsigned long base;
+	/* vitual start address of the remaped pmem space */
+	unsigned char __iomem *vbase;
+	/* total size of the pmem space */
+	unsigned long size;
+	/* number of entries in the pmem space */
+	unsigned long num_entries;
+	/* pfn of the garbage page in memory */
+	unsigned long garbage_pfn;
+	/* index of the garbage page in the pmem space */
+	int garbage_index;
+	/* the bitmap for the region indicating which entries are allocated
+	 * and which are free */
+	struct pmem_bits *bitmap;
+	/* indicates the region should not be managed with an allocator */
+	unsigned no_allocator;
+	/* indicates maps of this region should be cached, if a mix of
+	 * cached and uncached is desired, set this and open the device with
+	 * O_SYNC to get an uncached region */
+	unsigned cached;
+	unsigned buffered;
+	/* in no_allocator mode the first mapper gets the whole space and sets
+	 * this flag */
+	unsigned allocated;
+	/* for debugging, creates a list of pmem file structs, the
+	 * data_list_lock should be taken before pmem_data->sem if both are
+	 * needed */
+	struct mutex data_list_lock;
+	struct list_head data_list;
+	/* pmem_sem protects the bitmap array
+	 * a write lock should be held when modifying entries in bitmap
+	 * a read lock should be held when reading data from bits or
+	 * dereferencing a pointer into bitmap
+	 *
+	 * pmem_data->sem protects the pmem data of a particular file
+	 * Many of the function that require the pmem_data->sem have a non-
+	 * locking version for when the caller is already holding that sem.
+	 *
+	 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
+	 * down(pmem_data->sem) => down(bitmap_sem)
+	 */
+	struct rw_semaphore bitmap_sem;
+
+	long (*ioctl)(struct file *, unsigned int, unsigned long);
+	int (*release)(struct inode *, struct file *);
+};
+
+static struct pmem_info pmem[PMEM_MAX_DEVICES];
+static int id_count;
+
+#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
+#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
+#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
+#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
+#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
+#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
+	PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
+	PMEM_LEN(id, index))
+#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
+#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
+	(!(data->flags & PMEM_FLAGS_UNSUBMAP)))
+
+static int pmem_release(struct inode *, struct file *);
+static int pmem_mmap(struct file *, struct vm_area_struct *);
+static int pmem_open(struct inode *, struct file *);
+static long pmem_ioctl(struct file *, unsigned int, unsigned long);
+
+struct file_operations pmem_fops = {
+	.release = pmem_release,
+	.mmap = pmem_mmap,
+	.open = pmem_open,
+	.unlocked_ioctl = pmem_ioctl,
+};
+
+static int get_id(struct file *file)
+{
+	return MINOR(file->f_dentry->d_inode->i_rdev);
+}
+
+int is_pmem_file(struct file *file)
+{
+	int id;
+
+	if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
+		return 0;
+	id = get_id(file);
+	if (unlikely(id >= PMEM_MAX_DEVICES))
+		return 0;
+	if (unlikely(file->f_dentry->d_inode->i_rdev !=
+	     MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
+		return 0;
+	return 1;
+}
+
+static int has_allocation(struct file *file)
+{
+	struct pmem_data *data;
+	/* check is_pmem_file first if not accessed via pmem_file_ops */
+
+	if (unlikely(!file->private_data))
+		return 0;
+	data = (struct pmem_data *)file->private_data;
+	if (unlikely(data->index < 0))
+		return 0;
+	return 1;
+}
+
+static int is_master_owner(struct file *file)
+{
+	struct file *master_file;
+	struct pmem_data *data;
+	int put_needed, ret = 0;
+
+	if (!is_pmem_file(file) || !has_allocation(file))
+		return 0;
+	data = (struct pmem_data *)file->private_data;
+	if (PMEM_FLAGS_MASTERMAP & data->flags)
+		return 1;
+	master_file = fget_light(data->master_fd, &put_needed);
+	if (master_file && data->master_file == master_file)
+		ret = 1;
+	fput_light(master_file, put_needed);
+	return ret;
+}
+
+static int pmem_free(int id, int index)
+{
+	/* caller should hold the write lock on pmem_sem! */
+	int buddy, curr = index;
+	DLOG("index %d\n", index);
+
+	if (pmem[id].no_allocator) {
+		pmem[id].allocated = 0;
+		return 0;
+	}
+	/* clean up the bitmap, merging any buddies */
+	pmem[id].bitmap[curr].allocated = 0;
+	/* find a slots buddy Buddy# = Slot# ^ (1 << order)
+	 * if the buddy is also free merge them
+	 * repeat until the buddy is not free or end of the bitmap is reached
+	 */
+	do {
+		buddy = PMEM_BUDDY_INDEX(id, curr);
+		if (PMEM_IS_FREE(id, buddy) &&
+				PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
+			PMEM_ORDER(id, buddy)++;
+			PMEM_ORDER(id, curr)++;
+			curr = min(buddy, curr);
+		} else {
+			break;
+		}
+	} while (curr < pmem[id].num_entries);
+
+	return 0;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data);
+
+static int pmem_release(struct inode *inode, struct file *file)
+{
+	struct pmem_data *data = (struct pmem_data *)file->private_data;
+	struct pmem_region_node *region_node;
+	struct list_head *elt, *elt2;
+	int id = get_id(file), ret = 0;
+
+
+	mutex_lock(&pmem[id].data_list_lock);
+	/* if this file is a master, revoke all the memory in the connected
+	 *  files */
+	if (PMEM_FLAGS_MASTERMAP & data->flags) {
+		struct pmem_data *sub_data;
+		list_for_each(elt, &pmem[id].data_list) {
+			sub_data = list_entry(elt, struct pmem_data, list);
+			down_read(&sub_data->sem);
+			if (PMEM_IS_SUBMAP(sub_data) &&
+			    file == sub_data->master_file) {
+				up_read(&sub_data->sem);
+				pmem_revoke(file, sub_data);
+			}  else
+				up_read(&sub_data->sem);
+		}
+	}
+	list_del(&data->list);
+	mutex_unlock(&pmem[id].data_list_lock);
+
+
+	down_write(&data->sem);
+
+	/* if its not a conencted file and it has an allocation, free it */
+	if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
+		down_write(&pmem[id].bitmap_sem);
+		ret = pmem_free(id, data->index);
+		up_write(&pmem[id].bitmap_sem);
+	}
+
+	/* if this file is a submap (mapped, connected file), downref the
+	 * task struct */
+	if (PMEM_FLAGS_SUBMAP & data->flags)
+		if (data->task) {
+			put_task_struct(data->task);
+			data->task = NULL;
+		}
+
+	file->private_data = NULL;
+
+	list_for_each_safe(elt, elt2, &data->region_list) {
+		region_node = list_entry(elt, struct pmem_region_node, list);
+		list_del(elt);
+		kfree(region_node);
+	}
+	BUG_ON(!list_empty(&data->region_list));
+
+	up_write(&data->sem);
+	kfree(data);
+	if (pmem[id].release)
+		ret = pmem[id].release(inode, file);
+
+	return ret;
+}
+
+static int pmem_open(struct inode *inode, struct file *file)
+{
+	struct pmem_data *data;
+	int id = get_id(file);
+	int ret = 0;
+
+	DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
+	/* setup file->private_data to indicate its unmapped */
+	/*  you can only open a pmem device one time */
+	if (file->private_data != NULL)
+		return -1;
+	data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
+	if (!data) {
+		printk("pmem: unable to allocate memory for pmem metadata.");
+		return -1;
+	}
+	data->flags = 0;
+	data->index = -1;
+	data->task = NULL;
+	data->vma = NULL;
+	data->pid = 0;
+	data->master_file = NULL;
+#if PMEM_DEBUG
+	data->ref = 0;
+#endif
+	INIT_LIST_HEAD(&data->region_list);
+	init_rwsem(&data->sem);
+
+	file->private_data = data;
+	INIT_LIST_HEAD(&data->list);
+
+	mutex_lock(&pmem[id].data_list_lock);
+	list_add(&data->list, &pmem[id].data_list);
+	mutex_unlock(&pmem[id].data_list_lock);
+	return ret;
+}
+
+static unsigned long pmem_order(unsigned long len)
+{
+	int i;
+
+	len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
+	len--;
+	for (i = 0; i < sizeof(len)*8; i++)
+		if (len >> i == 0)
+			break;
+	return i;
+}
+
+static int pmem_allocate(int id, unsigned long len)
+{
+	/* caller should hold the write lock on pmem_sem! */
+	/* return the corresponding pdata[] entry */
+	int curr = 0;
+	int end = pmem[id].num_entries;
+	int best_fit = -1;
+	unsigned long order = pmem_order(len);
+
+	if (pmem[id].no_allocator) {
+		DLOG("no allocator");
+		if ((len > pmem[id].size) || pmem[id].allocated)
+			return -1;
+		pmem[id].allocated = 1;
+		return len;
+	}
+
+	if (order > PMEM_MAX_ORDER)
+		return -1;
+	DLOG("order %lx\n", order);
+
+	/* look through the bitmap:
+	 * 	if you find a free slot of the correct order use it
+	 * 	otherwise, use the best fit (smallest with size > order) slot
+	 */
+	while (curr < end) {
+		if (PMEM_IS_FREE(id, curr)) {
+			if (PMEM_ORDER(id, curr) == (unsigned char)order) {
+				/* set the not free bit and clear others */
+				best_fit = curr;
+				break;
+			}
+			if (PMEM_ORDER(id, curr) > (unsigned char)order &&
+			    (best_fit < 0 ||
+			     PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
+				best_fit = curr;
+		}
+		curr = PMEM_NEXT_INDEX(id, curr);
+	}
+
+	/* if best_fit < 0, there are no suitable slots,
+	 * return an error
+	 */
+	if (best_fit < 0) {
+		printk("pmem: no space left to allocate!\n");
+		return -1;
+	}
+
+	/* now partition the best fit:
+	 * 	split the slot into 2 buddies of order - 1
+	 * 	repeat until the slot is of the correct order
+	 */
+	while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
+		int buddy;
+		PMEM_ORDER(id, best_fit) -= 1;
+		buddy = PMEM_BUDDY_INDEX(id, best_fit);
+		PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
+	}
+	pmem[id].bitmap[best_fit].allocated = 1;
+	return best_fit;
+}
+
+static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
+{
+	int id = get_id(file);
+#ifdef pgprot_noncached
+	if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
+		return pgprot_noncached(vma_prot);
+#endif
+#ifdef pgprot_ext_buffered
+	else if (pmem[id].buffered)
+		return pgprot_ext_buffered(vma_prot);
+#endif
+	return vma_prot;
+}
+
+static unsigned long pmem_start_addr(int id, struct pmem_data *data)
+{
+	if (pmem[id].no_allocator)
+		return PMEM_START_ADDR(id, 0);
+	else
+		return PMEM_START_ADDR(id, data->index);
+
+}
+
+static void *pmem_start_vaddr(int id, struct pmem_data *data)
+{
+	return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+}
+
+static unsigned long pmem_len(int id, struct pmem_data *data)
+{
+	if (pmem[id].no_allocator)
+		return data->index;
+	else
+		return PMEM_LEN(id, data->index);
+}
+
+static int pmem_map_garbage(int id, struct vm_area_struct *vma,
+			    struct pmem_data *data, unsigned long offset,
+			    unsigned long len)
+{
+	int i, garbage_pages = len >> PAGE_SHIFT;
+
+	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
+	for (i = 0; i < garbage_pages; i++) {
+		if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
+		    pmem[id].garbage_pfn))
+			return -EAGAIN;
+	}
+	return 0;
+}
+
+static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
+				struct pmem_data *data, unsigned long offset,
+				unsigned long len)
+{
+	int garbage_pages;
+	DLOG("unmap offset %lx len %lx\n", offset, len);
+
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+
+	garbage_pages = len >> PAGE_SHIFT;
+	zap_page_range(vma, vma->vm_start + offset, len, NULL);
+	pmem_map_garbage(id, vma, data, offset, len);
+	return 0;
+}
+
+static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
+			      struct pmem_data *data, unsigned long offset,
+			      unsigned long len)
+{
+	DLOG("map offset %lx len %lx\n", offset, len);
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
+
+	if (io_remap_pfn_range(vma, vma->vm_start + offset,
+		(pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
+		len, vma->vm_page_prot)) {
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
+			      struct pmem_data *data, unsigned long offset,
+			      unsigned long len)
+{
+	/* hold the mm semp for the vma you are modifying when you call this */
+	BUG_ON(!vma);
+	zap_page_range(vma, vma->vm_start + offset, len, NULL);
+	return pmem_map_pfn_range(id, vma, data, offset, len);
+}
+
+static void pmem_vma_open(struct vm_area_struct *vma)
+{
+	struct file *file = vma->vm_file;
+	struct pmem_data *data = file->private_data;
+	int id = get_id(file);
+	/* this should never be called as we don't support copying pmem
+	 * ranges via fork */
+	BUG_ON(!has_allocation(file));
+	down_write(&data->sem);
+	/* remap the garbage pages, forkers don't get access to the data */
+	pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
+	up_write(&data->sem);
+}
+
+static void pmem_vma_close(struct vm_area_struct *vma)
+{
+	struct file *file = vma->vm_file;
+	struct pmem_data *data = file->private_data;
+
+	DLOG("current %u ppid %u file %p count %d\n", current->pid,
+	     current->parent->pid, file, file_count(file));
+	if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
+		printk(KERN_WARNING "pmem: something is very wrong, you are "
+		       "closing a vm backing an allocation that doesn't "
+		       "exist!\n");
+		return;
+	}
+	down_write(&data->sem);
+	if (data->vma == vma) {
+		data->vma = NULL;
+		if ((data->flags & PMEM_FLAGS_CONNECTED) &&
+		    (data->flags & PMEM_FLAGS_SUBMAP))
+			data->flags |= PMEM_FLAGS_UNSUBMAP;
+	}
+	/* the kernel is going to free this vma now anyway */
+	up_write(&data->sem);
+}
+
+static struct vm_operations_struct vm_ops = {
+	.open = pmem_vma_open,
+	.close = pmem_vma_close,
+};
+
+static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct pmem_data *data;
+	int index;
+	unsigned long vma_size =  vma->vm_end - vma->vm_start;
+	int ret = 0, id = get_id(file);
+
+	if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
+#if PMEM_DEBUG
+		printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
+				" and a multiple of pages_size.\n");
+#endif
+		return -EINVAL;
+	}
+
+	data = (struct pmem_data *)file->private_data;
+	down_write(&data->sem);
+	/* check this file isn't already mmaped, for submaps check this file
+	 * has never been mmaped */
+	if ((data->flags & PMEM_FLAGS_SUBMAP) ||
+	    (data->flags & PMEM_FLAGS_UNSUBMAP)) {
+#if PMEM_DEBUG
+		printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
+		       "this file is already mmaped. %x\n", data->flags);
+#endif
+		ret = -EINVAL;
+		goto error;
+	}
+	/* if file->private_data == unalloced, alloc*/
+	if (data && data->index == -1) {
+		down_write(&pmem[id].bitmap_sem);
+		index = pmem_allocate(id, vma->vm_end - vma->vm_start);
+		up_write(&pmem[id].bitmap_sem);
+		data->index = index;
+	}
+	/* either no space was available or an error occured */
+	if (!has_allocation(file)) {
+		ret = -EINVAL;
+		printk("pmem: could not find allocation for map.\n");
+		goto error;
+	}
+
+	if (pmem_len(id, data) < vma_size) {
+#if PMEM_DEBUG
+		printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
+		       "size of backing region [%lu].\n", vma_size,
+		       pmem_len(id, data));
+#endif
+		ret = -EINVAL;
+		goto error;
+	}
+
+	vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
+	vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
+
+	if (data->flags & PMEM_FLAGS_CONNECTED) {
+		struct pmem_region_node *region_node;
+		struct list_head *elt;
+		if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
+			printk("pmem: mmap failed in kernel!\n");
+			ret = -EAGAIN;
+			goto error;
+		}
+		list_for_each(elt, &data->region_list) {
+			region_node = list_entry(elt, struct pmem_region_node,
+						 list);
+			DLOG("remapping file: %p %lx %lx\n", file,
+				region_node->region.offset,
+				region_node->region.len);
+			if (pmem_remap_pfn_range(id, vma, data,
+						 region_node->region.offset,
+						 region_node->region.len)) {
+				ret = -EAGAIN;
+				goto error;
+			}
+		}
+		data->flags |= PMEM_FLAGS_SUBMAP;
+		get_task_struct(current->group_leader);
+		data->task = current->group_leader;
+		data->vma = vma;
+#if PMEM_DEBUG
+		data->pid = current->pid;
+#endif
+		DLOG("submmapped file %p vma %p pid %u\n", file, vma,
+		     current->pid);
+	} else {
+		if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
+			printk(KERN_INFO "pmem: mmap failed in kernel!\n");
+			ret = -EAGAIN;
+			goto error;
+		}
+		data->flags |= PMEM_FLAGS_MASTERMAP;
+		data->pid = current->pid;
+	}
+	vma->vm_ops = &vm_ops;
+error:
+	up_write(&data->sem);
+	return ret;
+}
+
+/* the following are the api for accessing pmem regions by other drivers
+ * from inside the kernel */
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+		   unsigned long *len)
+{
+	struct pmem_data *data;
+	if (!is_pmem_file(file) || !has_allocation(file)) {
+#if PMEM_DEBUG
+		printk(KERN_INFO "pmem: requested pmem data from invalid"
+				  "file.\n");
+#endif
+		return -1;
+	}
+	data = (struct pmem_data *)file->private_data;
+	down_read(&data->sem);
+	if (data->vma) {
+		*start = data->vma->vm_start;
+		*len = data->vma->vm_end - data->vma->vm_start;
+	} else {
+		*start = 0;
+		*len = 0;
+	}
+	up_read(&data->sem);
+	return 0;
+}
+
+int get_pmem_addr(struct file *file, unsigned long *start,
+		  unsigned long *vstart, unsigned long *len)
+{
+	struct pmem_data *data;
+	int id;
+
+	if (!is_pmem_file(file) || !has_allocation(file)) {
+		return -1;
+	}
+
+	data = (struct pmem_data *)file->private_data;
+	if (data->index == -1) {
+#if PMEM_DEBUG
+		printk(KERN_INFO "pmem: requested pmem data from file with no "
+		       "allocation.\n");
+		return -1;
+#endif
+	}
+	id = get_id(file);
+
+	down_read(&data->sem);
+	*start = pmem_start_addr(id, data);
+	*len = pmem_len(id, data);
+	*vstart = (unsigned long)pmem_start_vaddr(id, data);
+	up_read(&data->sem);
+#if PMEM_DEBUG
+	down_write(&data->sem);
+	data->ref++;
+	up_write(&data->sem);
+#endif
+	return 0;
+}
+
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+		  unsigned long *len, struct file **filp)
+{
+	struct file *file;
+
+	file = fget(fd);
+	if (unlikely(file == NULL)) {
+		printk(KERN_INFO "pmem: requested data from file descriptor "
+		       "that doesn't exist.");
+		return -1;
+	}
+
+	if (get_pmem_addr(file, start, vstart, len))
+		goto end;
+
+	if (filp)
+		*filp = file;
+	return 0;
+end:
+	fput(file);
+	return -1;
+}
+
+void put_pmem_file(struct file *file)
+{
+	struct pmem_data *data;
+	int id;
+
+	if (!is_pmem_file(file))
+		return;
+	id = get_id(file);
+	data = (struct pmem_data *)file->private_data;
+#if PMEM_DEBUG
+	down_write(&data->sem);
+	if (data->ref == 0) {
+		printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
+		       pmem[id].dev.name, data->pid);
+		BUG();
+	}
+	data->ref--;
+	up_write(&data->sem);
+#endif
+	fput(file);
+}
+
+void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
+{
+	struct pmem_data *data;
+	int id;
+	void *vaddr;
+	struct pmem_region_node *region_node;
+	struct list_head *elt;
+	void *flush_start, *flush_end;
+
+	if (!is_pmem_file(file) || !has_allocation(file)) {
+		return;
+	}
+
+	id = get_id(file);
+	data = (struct pmem_data *)file->private_data;
+	if (!pmem[id].cached || file->f_flags & O_SYNC)
+		return;
+
+	down_read(&data->sem);
+	vaddr = pmem_start_vaddr(id, data);
+	/* if this isn't a submmapped file, flush the whole thing */
+	if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
+		dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
+		goto end;
+	}
+	/* otherwise, flush the region of the file we are drawing */
+	list_for_each(elt, &data->region_list) {
+		region_node = list_entry(elt, struct pmem_region_node, list);
+		if ((offset >= region_node->region.offset) &&
+		    ((offset + len) <= (region_node->region.offset +
+			region_node->region.len))) {
+			flush_start = vaddr + region_node->region.offset;
+			flush_end = flush_start + region_node->region.len;
+			dmac_flush_range(flush_start, flush_end);
+			break;
+		}
+	}
+end:
+	up_read(&data->sem);
+}
+
+static int pmem_connect(unsigned long connect, struct file *file)
+{
+	struct pmem_data *data = (struct pmem_data *)file->private_data;
+	struct pmem_data *src_data;
+	struct file *src_file;
+	int ret = 0, put_needed;
+
+	down_write(&data->sem);
+	/* retrieve the src file and check it is a pmem file with an alloc */
+	src_file = fget_light(connect, &put_needed);
+	DLOG("connect %p to %p\n", file, src_file);
+	if (!src_file) {
+		printk("pmem: src file not found!\n");
+		ret = -EINVAL;
+		goto err_no_file;
+	}
+	if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
+		printk(KERN_INFO "pmem: src file is not a pmem file or has no "
+		       "alloc!\n");
+		ret = -EINVAL;
+		goto err_bad_file;
+	}
+	src_data = (struct pmem_data *)src_file->private_data;
+
+	if (has_allocation(file) && (data->index != src_data->index)) {
+		printk("pmem: file is already mapped but doesn't match this"
+		       " src_file!\n");
+		ret = -EINVAL;
+		goto err_bad_file;
+	}
+	data->index = src_data->index;
+	data->flags |= PMEM_FLAGS_CONNECTED;
+	data->master_fd = connect;
+	data->master_file = src_file;
+
+err_bad_file:
+	fput_light(src_file, put_needed);
+err_no_file:
+	up_write(&data->sem);
+	return ret;
+}
+
+static void pmem_unlock_data_and_mm(struct pmem_data *data,
+				    struct mm_struct *mm)
+{
+	up_write(&data->sem);
+	if (mm != NULL) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+}
+
+static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
+				 struct mm_struct **locked_mm)
+{
+	int ret = 0;
+	struct mm_struct *mm = NULL;
+	*locked_mm = NULL;
+lock_mm:
+	down_read(&data->sem);
+	if (PMEM_IS_SUBMAP(data)) {
+		mm = get_task_mm(data->task);
+		if (!mm) {
+#if PMEM_DEBUG
+			printk("pmem: can't remap task is gone!\n");
+#endif
+			up_read(&data->sem);
+			return -1;
+		}
+	}
+	up_read(&data->sem);
+
+	if (mm)
+		down_write(&mm->mmap_sem);
+
+	down_write(&data->sem);
+	/* check that the file didn't get mmaped before we could take the
+	 * data sem, this should be safe b/c you can only submap each file
+	 * once */
+	if (PMEM_IS_SUBMAP(data) && !mm) {
+		pmem_unlock_data_and_mm(data, mm);
+		up_write(&data->sem);
+		goto lock_mm;
+	}
+	/* now check that vma.mm is still there, it could have been
+	 * deleted by vma_close before we could get the data->sem */
+	if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
+		/* might as well release this */
+		if (data->flags & PMEM_FLAGS_SUBMAP) {
+			put_task_struct(data->task);
+			data->task = NULL;
+			/* lower the submap flag to show the mm is gone */
+			data->flags &= ~(PMEM_FLAGS_SUBMAP);
+		}
+		pmem_unlock_data_and_mm(data, mm);
+		return -1;
+	}
+	*locked_mm = mm;
+	return ret;
+}
+
+int pmem_remap(struct pmem_region *region, struct file *file,
+		      unsigned operation)
+{
+	int ret;
+	struct pmem_region_node *region_node;
+	struct mm_struct *mm = NULL;
+	struct list_head *elt, *elt2;
+	int id = get_id(file);
+	struct pmem_data *data = (struct pmem_data *)file->private_data;
+
+	/* pmem region must be aligned on a page boundry */
+	if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
+		 !PMEM_IS_PAGE_ALIGNED(region->len))) {
+#if PMEM_DEBUG
+		printk("pmem: request for unaligned pmem suballocation "
+		       "%lx %lx\n", region->offset, region->len);
+#endif
+		return -EINVAL;
+	}
+
+	/* if userspace requests a region of len 0, there's nothing to do */
+	if (region->len == 0)
+		return 0;
+
+	/* lock the mm and data */
+	ret = pmem_lock_data_and_mm(file, data, &mm);
+	if (ret)
+		return 0;
+
+	/* only the owner of the master file can remap the client fds
+	 * that back in it */
+	if (!is_master_owner(file)) {
+#if PMEM_DEBUG
+		printk("pmem: remap requested from non-master process\n");
+#endif
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* check that the requested range is within the src allocation */
+	if (unlikely((region->offset > pmem_len(id, data)) ||
+		     (region->len > pmem_len(id, data)) ||
+		     (region->offset + region->len > pmem_len(id, data)))) {
+#if PMEM_DEBUG
+		printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
+#endif
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (operation == PMEM_MAP) {
+		region_node = kmalloc(sizeof(struct pmem_region_node),
+			      GFP_KERNEL);
+		if (!region_node) {
+			ret = -ENOMEM;
+#if PMEM_DEBUG
+			printk(KERN_INFO "No space to allocate metadata!");
+#endif
+			goto err;
+		}
+		region_node->region = *region;
+		list_add(&region_node->list, &data->region_list);
+	} else if (operation == PMEM_UNMAP) {
+		int found = 0;
+		list_for_each_safe(elt, elt2, &data->region_list) {
+			region_node = list_entry(elt, struct pmem_region_node,
+				      list);
+			if (region->len == 0 ||
+			    (region_node->region.offset == region->offset &&
+			    region_node->region.len == region->len)) {
+				list_del(elt);
+				kfree(region_node);
+				found = 1;
+			}
+		}
+		if (!found) {
+#if PMEM_DEBUG
+			printk("pmem: Unmap region does not map any mapped "
+				"region!");
+#endif
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	if (data->vma && PMEM_IS_SUBMAP(data)) {
+		if (operation == PMEM_MAP)
+			ret = pmem_remap_pfn_range(id, data->vma, data,
+						   region->offset, region->len);
+		else if (operation == PMEM_UNMAP)
+			ret = pmem_unmap_pfn_range(id, data->vma, data,
+						   region->offset, region->len);
+	}
+
+err:
+	pmem_unlock_data_and_mm(data, mm);
+	return ret;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data)
+{
+	struct pmem_region_node *region_node;
+	struct list_head *elt, *elt2;
+	struct mm_struct *mm = NULL;
+	int id = get_id(file);
+	int ret = 0;
+
+	data->master_file = NULL;
+	ret = pmem_lock_data_and_mm(file, data, &mm);
+	/* if lock_data_and_mm fails either the task that mapped the fd, or
+	 * the vma that mapped it have already gone away, nothing more
+	 * needs to be done */
+	if (ret)
+		return;
+	/* unmap everything */
+	/* delete the regions and region list nothing is mapped any more */
+	if (data->vma)
+		list_for_each_safe(elt, elt2, &data->region_list) {
+			region_node = list_entry(elt, struct pmem_region_node,
+						 list);
+			pmem_unmap_pfn_range(id, data->vma, data,
+					     region_node->region.offset,
+					     region_node->region.len);
+			list_del(elt);
+			kfree(region_node);
+	}
+	/* delete the master file */
+	pmem_unlock_data_and_mm(data, mm);
+}
+
+static void pmem_get_size(struct pmem_region *region, struct file *file)
+{
+	struct pmem_data *data = (struct pmem_data *)file->private_data;
+	int id = get_id(file);
+
+	if (!has_allocation(file)) {
+		region->offset = 0;
+		region->len = 0;
+		return;
+	} else {
+		region->offset = pmem_start_addr(id, data);
+		region->len = pmem_len(id, data);
+	}
+	DLOG("offset %lx len %lx\n", region->offset, region->len);
+}
+
+
+static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pmem_data *data;
+	int id = get_id(file);
+
+	switch (cmd) {
+	case PMEM_GET_PHYS:
+		{
+			struct pmem_region region;
+			DLOG("get_phys\n");
+			if (!has_allocation(file)) {
+				region.offset = 0;
+				region.len = 0;
+			} else {
+				data = (struct pmem_data *)file->private_data;
+				region.offset = pmem_start_addr(id, data);
+				region.len = pmem_len(id, data);
+			}
+			printk(KERN_INFO "pmem: request for physical address of pmem region "
+					"from process %d.\n", current->pid);
+			if (copy_to_user((void __user *)arg, &region,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			break;
+		}
+	case PMEM_MAP:
+		{
+			struct pmem_region region;
+			if (copy_from_user(&region, (void __user *)arg,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			data = (struct pmem_data *)file->private_data;
+			return pmem_remap(&region, file, PMEM_MAP);
+		}
+		break;
+	case PMEM_UNMAP:
+		{
+			struct pmem_region region;
+			if (copy_from_user(&region, (void __user *)arg,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			data = (struct pmem_data *)file->private_data;
+			return pmem_remap(&region, file, PMEM_UNMAP);
+			break;
+		}
+	case PMEM_GET_SIZE:
+		{
+			struct pmem_region region;
+			DLOG("get_size\n");
+			pmem_get_size(&region, file);
+			if (copy_to_user((void __user *)arg, &region,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			break;
+		}
+	case PMEM_GET_TOTAL_SIZE:
+		{
+			struct pmem_region region;
+			DLOG("get total size\n");
+			region.offset = 0;
+			get_id(file);
+			region.len = pmem[id].size;
+			if (copy_to_user((void __user *)arg, &region,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			break;
+		}
+	case PMEM_ALLOCATE:
+		{
+			if (has_allocation(file))
+				return -EINVAL;
+			data = (struct pmem_data *)file->private_data;
+			data->index = pmem_allocate(id, arg);
+			break;
+		}
+	case PMEM_CONNECT:
+		DLOG("connect\n");
+		return pmem_connect(arg, file);
+		break;
+	case PMEM_CACHE_FLUSH:
+		{
+			struct pmem_region region;
+			DLOG("flush\n");
+			if (copy_from_user(&region, (void __user *)arg,
+					   sizeof(struct pmem_region)))
+				return -EFAULT;
+			flush_pmem_file(file, region.offset, region.len);
+			break;
+		}
+	default:
+		if (pmem[id].ioctl)
+			return pmem[id].ioctl(file, cmd, arg);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#if PMEM_DEBUG
+static ssize_t debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	struct list_head *elt, *elt2;
+	struct pmem_data *data;
+	struct pmem_region_node *region_node;
+	int id = (int)file->private_data;
+	const int debug_bufmax = 4096;
+	static char buffer[4096];
+	int n = 0;
+
+	DLOG("debug open\n");
+	n = scnprintf(buffer, debug_bufmax,
+		      "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+	mutex_lock(&pmem[id].data_list_lock);
+	list_for_each(elt, &pmem[id].data_list) {
+		data = list_entry(elt, struct pmem_data, list);
+		down_read(&data->sem);
+		n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
+				data->pid);
+		list_for_each(elt2, &data->region_list) {
+			region_node = list_entry(elt2, struct pmem_region_node,
+				      list);
+			n += scnprintf(buffer + n, debug_bufmax - n,
+					"(%lx,%lx) ",
+					region_node->region.offset,
+					region_node->region.len);
+		}
+		n += scnprintf(buffer + n, debug_bufmax - n, "\n");
+		up_read(&data->sem);
+	}
+	mutex_unlock(&pmem[id].data_list_lock);
+
+	n++;
+	buffer[n] = 0;
+	return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+
+static struct file_operations debug_fops = {
+	.read = debug_read,
+	.open = debug_open,
+};
+#endif
+
+#if 0
+static struct miscdevice pmem_dev = {
+	.name = "pmem",
+	.fops = &pmem_fops,
+};
+#endif
+
+int pmem_setup(struct android_pmem_platform_data *pdata,
+	       long (*ioctl)(struct file *, unsigned int, unsigned long),
+	       int (*release)(struct inode *, struct file *))
+{
+	int err = 0;
+	int i, index = 0;
+	int id = id_count;
+	id_count++;
+
+	pmem[id].no_allocator = pdata->no_allocator;
+	pmem[id].cached = pdata->cached;
+	pmem[id].buffered = pdata->buffered;
+	pmem[id].base = pdata->start;
+	pmem[id].size = pdata->size;
+	pmem[id].ioctl = ioctl;
+	pmem[id].release = release;
+	init_rwsem(&pmem[id].bitmap_sem);
+	mutex_init(&pmem[id].data_list_lock);
+	INIT_LIST_HEAD(&pmem[id].data_list);
+	pmem[id].dev.name = pdata->name;
+	pmem[id].dev.minor = id;
+	pmem[id].dev.fops = &pmem_fops;
+	printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
+
+	err = misc_register(&pmem[id].dev);
+	if (err) {
+		printk(KERN_ALERT "Unable to register pmem driver!\n");
+		goto err_cant_register_device;
+	}
+	pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
+
+	pmem[id].bitmap = kmalloc(pmem[id].num_entries *
+				  sizeof(struct pmem_bits), GFP_KERNEL);
+	if (!pmem[id].bitmap)
+		goto err_no_mem_for_metadata;
+
+	memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
+					  pmem[id].num_entries);
+
+	for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
+		if ((pmem[id].num_entries) &  1<<i) {
+			PMEM_ORDER(id, index) = i;
+			index = PMEM_NEXT_INDEX(id, index);
+		}
+	}
+
+	if (pmem[id].cached)
+		pmem[id].vbase = ioremap_cached(pmem[id].base,
+						pmem[id].size);
+#ifdef ioremap_ext_buffered
+	else if (pmem[id].buffered)
+		pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+						      pmem[id].size);
+#endif
+	else
+		pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+
+	if (pmem[id].vbase == 0)
+		goto error_cant_remap;
+
+	pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
+	if (pmem[id].no_allocator)
+		pmem[id].allocated = 0;
+
+#if PMEM_DEBUG
+	debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
+			    &debug_fops);
+#endif
+	return 0;
+error_cant_remap:
+	kfree(pmem[id].bitmap);
+err_no_mem_for_metadata:
+	misc_deregister(&pmem[id].dev);
+err_cant_register_device:
+	return -1;
+}
+
+static int pmem_probe(struct platform_device *pdev)
+{
+	struct android_pmem_platform_data *pdata;
+
+	if (!pdev || !pdev->dev.platform_data) {
+		printk(KERN_ALERT "Unable to probe pmem!\n");
+		return -1;
+	}
+	pdata = pdev->dev.platform_data;
+	return pmem_setup(pdata, NULL, NULL);
+}
+
+
+static int pmem_remove(struct platform_device *pdev)
+{
+	int id = pdev->id;
+	__free_page(pfn_to_page(pmem[id].garbage_pfn));
+	misc_deregister(&pmem[id].dev);
+	return 0;
+}
+
+static struct platform_driver pmem_driver = {
+	.probe = pmem_probe,
+	.remove = pmem_remove,
+	.driver = { .name = "android_pmem" }
+};
+
+
+static int __init pmem_init(void)
+{
+	return platform_driver_register(&pmem_driver);
+}
+
+static void __exit pmem_exit(void)
+{
+	platform_driver_unregister(&pmem_driver);
+}
+
+module_init(pmem_init);
+module_exit(pmem_exit);
+
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 0000000..2141124
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,156 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+	struct list_head link;
+	uid_t uid;
+	atomic_t tcp_rcv;
+	atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+	unsigned long flags;
+	struct uid_stat *entry;
+
+	spin_lock_irqsave(&uid_lock, flags);
+	list_for_each_entry(entry, &uid_list, link) {
+		if (entry->uid == uid) {
+			spin_unlock_irqrestore(&uid_lock, flags);
+			return entry;
+		}
+	}
+	spin_unlock_irqrestore(&uid_lock, flags);
+	return NULL;
+}
+
+static int tcp_snd_read_proc(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	int len;
+	unsigned int bytes;
+	char *p = page;
+	struct uid_stat *uid_entry = (struct uid_stat *) data;
+	if (!data)
+		return 0;
+
+	bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN);
+	p += sprintf(p, "%u\n", bytes);
+	len = (p - page) - off;
+	*eof = (len <= count) ? 1 : 0;
+	*start = page + off;
+	return len;
+}
+
+static int tcp_rcv_read_proc(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	int len;
+	unsigned int bytes;
+	char *p = page;
+	struct uid_stat *uid_entry = (struct uid_stat *) data;
+	if (!data)
+		return 0;
+
+	bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN);
+	p += sprintf(p, "%u\n", bytes);
+	len = (p - page) - off;
+	*eof = (len <= count) ? 1 : 0;
+	*start = page + off;
+	return len;
+}
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+	unsigned long flags;
+	char uid_s[32];
+	struct uid_stat *new_uid;
+	struct proc_dir_entry *entry;
+
+	/* Create the uid stat struct and append it to the list. */
+	if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL)
+		return NULL;
+
+	new_uid->uid = uid;
+	/* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+	atomic_set(&new_uid->tcp_rcv, INT_MIN);
+	atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+	spin_lock_irqsave(&uid_lock, flags);
+	list_add_tail(&new_uid->link, &uid_list);
+	spin_unlock_irqrestore(&uid_lock, flags);
+
+	sprintf(uid_s, "%d", uid);
+	entry = proc_mkdir(uid_s, parent);
+
+	/* Keep reference to uid_stat so we know what uid to read stats from. */
+	create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc,
+		(void *) new_uid);
+
+	create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc,
+		(void *) new_uid);
+
+	return new_uid;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+	struct uid_stat *entry;
+	activity_stats_update();
+	if ((entry = find_uid_stat(uid)) == NULL &&
+		((entry = create_stat(uid)) == NULL)) {
+			return -1;
+	}
+	atomic_add(size, &entry->tcp_snd);
+	return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+	struct uid_stat *entry;
+	activity_stats_update();
+	if ((entry = find_uid_stat(uid)) == NULL &&
+		((entry = create_stat(uid)) == NULL)) {
+			return -1;
+	}
+	atomic_add(size, &entry->tcp_rcv);
+	return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+	parent = proc_mkdir("uid_stat", NULL);
+	if (!parent) {
+		pr_err("uid_stat: failed to create proc entry\n");
+		return -1;
+	}
+	return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c
new file mode 100644
index 0000000..f5b9515
--- /dev/null
+++ b/drivers/misc/wl127x-rfkill.c
@@ -0,0 +1,121 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/wl127x-rfkill.h>
+
+static int wl127x_rfkill_set_power(void *data, enum rfkill_state state)
+{
+	int nshutdown_gpio = (int) data;
+
+	switch (state) {
+	case RFKILL_STATE_UNBLOCKED:
+		gpio_set_value(nshutdown_gpio, 1);
+		break;
+	case RFKILL_STATE_SOFT_BLOCKED:
+		gpio_set_value(nshutdown_gpio, 0);
+		break;
+	default:
+		printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state);
+	}
+	return 0;
+}
+
+static int wl127x_rfkill_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+	enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED;  /* off */
+
+	rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio");
+	if (unlikely(rc))
+		return rc;
+
+	rc = gpio_direction_output(pdata->nshutdown_gpio, 0);
+	if (unlikely(rc))
+		return rc;
+
+	rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state);
+	wl127x_rfkill_set_power(NULL, default_state);
+
+	pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+	if (unlikely(!pdata->rfkill))
+		return -ENOMEM;
+
+	pdata->rfkill->name = "wl127x";
+	pdata->rfkill->state = default_state;
+	/* userspace cannot take exclusive control */
+	pdata->rfkill->user_claim_unsupported = 1;
+	pdata->rfkill->user_claim = 0;
+	pdata->rfkill->data = (void *) pdata->nshutdown_gpio;
+	pdata->rfkill->toggle_radio = wl127x_rfkill_set_power;
+
+	rc = rfkill_register(pdata->rfkill);
+
+	if (unlikely(rc))
+		rfkill_free(pdata->rfkill);
+
+	return 0;
+}
+
+static int wl127x_rfkill_remove(struct platform_device *pdev)
+{
+	struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+
+	rfkill_unregister(pdata->rfkill);
+	rfkill_free(pdata->rfkill);
+	gpio_free(pdata->nshutdown_gpio);
+
+	return 0;
+}
+
+static struct platform_driver wl127x_rfkill_platform_driver = {
+	.probe = wl127x_rfkill_probe,
+	.remove = wl127x_rfkill_remove,
+	.driver = {
+		   .name = "wl127x-rfkill",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static int __init wl127x_rfkill_init(void)
+{
+	return platform_driver_register(&wl127x_rfkill_platform_driver);
+}
+
+static void __exit wl127x_rfkill_exit(void)
+{
+	platform_driver_unregister(&wl127x_rfkill_platform_driver);
+}
+
+module_init(wl127x_rfkill_init);
+module_exit(wl127x_rfkill_exit);
+
+MODULE_ALIAS("platform:wl127x");
+MODULE_DESCRIPTION("wl127x-rfkill");
+MODULE_AUTHOR("Motorola");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..ebb4afe 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@
 
 	  If unsure, say Y here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Deferr MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested. This will reduce overall resume latency and
+	  save power when theres an SD card inserted but not being used.
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e422..c779503 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -126,11 +126,7 @@
 
 static inline int mmc_get_devidx(struct gendisk *disk)
 {
-	int devmaj = MAJOR(disk_devt(disk));
-	int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
-	if (!devmaj)
-		devidx = disk->first_minor / perdev_minors;
+	int devidx = disk->first_minor / perdev_minors;
 	return devidx;
 }
 
@@ -818,6 +814,8 @@
 				continue;
 			}
 			status = get_card_status(card, req);
+		} else if (disable_multi == 1) {
+			disable_multi = 0;
 		}
 
 		if (brq.sbc.error) {
@@ -939,12 +937,22 @@
 	return 0;
 }
 
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
+
 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 {
 	int ret;
 	struct mmc_blk_data *md = mq->data;
 	struct mmc_card *card = md->queue.card;
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(card->host)) {
+		mmc_resume_bus(card->host);
+		mmc_blk_set_blksize(md, card);
+	}
+#endif
+
 	mmc_claim_host(card->host);
 	ret = mmc_blk_part_switch(card, md);
 	if (ret) {
@@ -1038,6 +1046,7 @@
 	md->disk->queue = md->queue.queue;
 	md->disk->driverfs_dev = parent;
 	set_disk_ro(md->disk, md->read_only || default_ro);
+	md->disk->flags = GENHD_FL_EXT_DEVT;
 
 	/*
 	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -1277,6 +1286,9 @@
 	mmc_set_drvdata(card, md);
 	mmc_fixup_device(card, blk_fixups);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -1302,6 +1314,9 @@
 	mmc_release_host(card->host);
 	mmc_blk_remove_req(md);
 	mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
 #ifdef CONFIG_PM
@@ -1325,7 +1340,9 @@
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
 
 	if (md) {
+#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 		mmc_blk_set_blksize(md, card);
+#endif
 
 		/*
 		 * Resume involves the card going into idle state,
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef10387..85c2e1a 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,20 @@
 	  support handling this in order for it to be of any use.
 
 	  If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+	boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  If you say Y here, support will be added for embedded SDIO
+	  devices which do not contain the necessary enumeration
+	  support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+	bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  If you say Y here, the MMC layer will be extra paranoid
+	  about re-trying SD init requests. This can be a useful
+	  work-around for buggy controllers and hardware. Enable
+	  if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7843efe..7c3444a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,7 @@
 #include <linux/log2.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -39,6 +40,7 @@
 #include "sdio_ops.h"
 
 static struct workqueue_struct *workqueue;
+static struct wake_lock mmc_delayed_work_wake_lock;
 
 /*
  * Enabling software CRCs on the data blocks can be a significant (30%)
@@ -71,6 +73,7 @@
 static int mmc_schedule_delayed_work(struct delayed_work *work,
 				     unsigned long delay)
 {
+	wake_lock(&mmc_delayed_work_wake_lock);
 	return queue_delayed_work(workqueue, work, delay);
 }
 
@@ -556,9 +559,12 @@
 
 	/* If the host is claimed then we do not want to disable it anymore */
 	if (!mmc_try_claim_host(host))
-		return;
+		goto out;
 	mmc_host_do_disable(host, 1);
 	mmc_do_release_host(host);
+
+out:
+	wake_unlock(&mmc_delayed_work_wake_lock);
 }
 
 /**
@@ -1094,6 +1100,36 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+int mmc_resume_bus(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	if (!mmc_bus_needs_resume(host))
+		return -EINVAL;
+
+	printk("%s: Starting deferred resume\n", mmc_hostname(host));
+	spin_lock_irqsave(&host->lock, flags);
+	host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+	host->rescan_disable = 0;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	mmc_bus_get(host);
+	if (host->bus_ops && !host->bus_dead) {
+		mmc_power_up(host);
+		BUG_ON(!host->bus_ops->resume);
+		host->bus_ops->resume(host);
+	}
+
+	if (host->bus_ops->detect && !host->bus_dead)
+		host->bus_ops->detect(host);
+
+	mmc_bus_put(host);
+	printk("%s: Deferred resume completed\n", mmc_hostname(host));
+	return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
 /*
  * Assign a mmc bus handler to a host. Only one bus handler may control a
  * host at any given time.
@@ -1568,6 +1604,7 @@
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
 	int i;
+	bool extend_wakelock = false;
 
 	if (host->rescan_disable)
 		return;
@@ -1582,6 +1619,12 @@
 	    && !(host->caps & MMC_CAP_NONREMOVABLE))
 		host->bus_ops->detect(host);
 
+	/* If the card was removed the bus will be marked
+	 * as dead - extend the wakelock so userspace
+	 * can respond */
+	if (host->bus_dead)
+		extend_wakelock = 1;
+
 	/*
 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
 	 * the card is no longer present.
@@ -1606,14 +1649,20 @@
 
 	mmc_claim_host(host);
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+			extend_wakelock = true;
 			break;
+		}
 		if (freqs[i] <= host->f_min)
 			break;
 	}
 	mmc_release_host(host);
 
  out:
+	if (extend_wakelock)
+		wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
+	else
+		wake_unlock(&mmc_delayed_work_wake_lock);
 	if (host->caps & MMC_CAP_NEEDS_POLL)
 		mmc_schedule_delayed_work(&host->detect, HZ);
 }
@@ -1751,6 +1800,9 @@
 {
 	int err = 0;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
+
 	if (host->caps & MMC_CAP_DISABLE)
 		cancel_delayed_work(&host->disable);
 	cancel_delayed_work(&host->detect);
@@ -1793,6 +1845,12 @@
 	int err = 0;
 
 	mmc_bus_get(host);
+	if (mmc_bus_manual_resume(host)) {
+		host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+		mmc_bus_put(host);
+		return 0;
+	}
+
 	if (host->bus_ops && !host->bus_dead) {
 		if (!mmc_card_keep_power(host)) {
 			mmc_power_up(host);
@@ -1843,6 +1901,10 @@
 	case PM_SUSPEND_PREPARE:
 
 		spin_lock_irqsave(&host->lock, flags);
+		if (mmc_bus_needs_resume(host)) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		host->rescan_disable = 1;
 		spin_unlock_irqrestore(&host->lock, flags);
 		cancel_delayed_work_sync(&host->detect);
@@ -1865,6 +1927,10 @@
 	case PM_POST_RESTORE:
 
 		spin_lock_irqsave(&host->lock, flags);
+		if (mmc_bus_manual_resume(host)) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		host->rescan_disable = 0;
 		spin_unlock_irqrestore(&host->lock, flags);
 		mmc_detect_change(host, 0);
@@ -1875,6 +1941,22 @@
 }
 #endif
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				struct sdio_cis *cis,
+				struct sdio_cccr *cccr,
+				struct sdio_embedded_func *funcs,
+				int num_funcs)
+{
+	host->embedded_sdio_data.cis = cis;
+	host->embedded_sdio_data.cccr = cccr;
+	host->embedded_sdio_data.funcs = funcs;
+	host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
 static int __init mmc_init(void)
 {
 	int ret;
@@ -1883,6 +1965,9 @@
 	if (!workqueue)
 		return -ENOMEM;
 
+	wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND,
+		       "mmc_delayed_work");
+
 	ret = mmc_register_bus();
 	if (ret)
 		goto destroy_workqueue;
@@ -1903,6 +1988,7 @@
 	mmc_unregister_bus();
 destroy_workqueue:
 	destroy_workqueue(workqueue);
+	wake_lock_destroy(&mmc_delayed_work_wake_lock);
 
 	return ret;
 }
@@ -1913,6 +1999,7 @@
 	mmc_unregister_host_class();
 	mmc_unregister_bus();
 	destroy_workqueue(workqueue);
+	wake_lock_destroy(&mmc_delayed_work_wake_lock);
 }
 
 subsys_initcall(mmc_init);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8..84694a9 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -336,7 +336,8 @@
 #endif
 
 	mmc_start_host(host);
-	register_pm_notifier(&host->pm_notify);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		register_pm_notifier(&host->pm_notify);
 
 	return 0;
 }
@@ -353,7 +354,9 @@
  */
 void mmc_remove_host(struct mmc_host *host)
 {
-	unregister_pm_notifier(&host->pm_notify);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		unregister_pm_notifier(&host->pm_notify);
+
 	mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ff27741..5decf49 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,6 +764,9 @@
 	bool reinit)
 {
 	int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	if (!reinit) {
 		/*
@@ -790,7 +793,26 @@
 		/*
 		 * Fetch switch information from card.
 		 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+		for (retries = 1; retries <= 3; retries++) {
+			err = mmc_read_switch(card);
+			if (!err) {
+				if (retries > 1) {
+					printk(KERN_WARNING
+					       "%s: recovered\n", 
+					       mmc_hostname(host));
+				}
+				break;
+			} else {
+				printk(KERN_WARNING
+				       "%s: read switch failed (attempt %d)\n",
+				       mmc_hostname(host), retries);
+			}
+		}
+#else
 		err = mmc_read_switch(card);
+#endif
+
 		if (err)
 			return err;
 	}
@@ -989,18 +1011,36 @@
  */
 static void mmc_sd_detect(struct mmc_host *host)
 {
-	int err;
+	int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+        int retries = 5;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
-
+       
 	mmc_claim_host(host);
 
 	/*
 	 * Just check if our card has been removed.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	while(retries) {
+		err = mmc_send_status(host->card, NULL);
+		if (err) {
+			retries--;
+			udelay(5);
+			continue;
+		}
+		break;
+	}
+	if (!retries) {
+		printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+		       __func__, mmc_hostname(host), err);
+	}
+#else
 	err = mmc_send_status(host->card, NULL);
-
+#endif
 	mmc_release_host(host);
 
 	if (err) {
@@ -1038,12 +1078,31 @@
 static int mmc_sd_resume(struct mmc_host *host)
 {
 	int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, host->ocr, host->card);
+
+		if (err) {
+			printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+			       mmc_hostname(host), err, retries);
+			mdelay(5);
+			retries--;
+			continue;
+		}
+		break;
+	}
+#else
 	err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
 	mmc_release_host(host);
 
 	return err;
@@ -1095,6 +1154,9 @@
 {
 	int err;
 	u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
@@ -1159,9 +1221,27 @@
 	/*
 	 * Detect and init the card.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, host->ocr, NULL);
+		if (err) {
+			retries--;
+			continue;
+		}
+		break;
+	}
+
+	if (!retries) {
+		printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+		       mmc_hostname(host), err);
+		goto err;
+	}
+#else
 	err = mmc_sd_init_card(host, host->ocr, NULL);
 	if (err)
 		goto err;
+#endif
 
 	mmc_release_host(host);
 	err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 262fff0..7da522e 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -27,6 +27,10 @@
 #include "sdio_ops.h"
 #include "sdio_cis.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
 static int sdio_read_fbr(struct sdio_func *func)
 {
 	int ret;
@@ -449,19 +453,35 @@
 		goto finish;
 	}
 
-	/*
-	 * Read the common registers.
-	 */
-	err = sdio_read_cccr(card);
-	if (err)
-		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cccr)
+		memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+	else {
+#endif
+		/*
+		 * Read the common registers.
+		 */
+		err = sdio_read_cccr(card);
+		if (err)
+			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
-	/*
-	 * Read the common CIS tuples.
-	 */
-	err = sdio_read_common_cis(card);
-	if (err)
-		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cis)
+		memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+	else {
+#endif
+		/*
+		 * Read the common CIS tuples.
+		 */
+		err = sdio_read_common_cis(card);
+		if (err)
+			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
 	if (oldcard) {
 		int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -826,14 +846,36 @@
 	funcs = (ocr & 0x70000000) >> 28;
 	card->sdio_funcs = 0;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.funcs)
+		card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
 	/*
 	 * Initialize (but don't add) all present functions.
 	 */
 	for (i = 0; i < funcs; i++, card->sdio_funcs++) {
-		err = sdio_init_func(host->card, i + 1);
-		if (err)
-			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		if (host->embedded_sdio_data.funcs) {
+			struct sdio_func *tmp;
 
+			tmp = sdio_alloc_func(host->card);
+			if (IS_ERR(tmp))
+				goto remove;
+			tmp->num = (i + 1);
+			card->sdio_func[i] = tmp;
+			tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+			tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+			tmp->vendor = card->cis.vendor;
+			tmp->device = card->cis.device;
+		} else {
+#endif
+			err = sdio_init_func(host->card, i + 1);
+			if (err)
+				goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		}
+#endif
 		/*
 		 * Enable Runtime PM for this func (if supported)
 		 */
@@ -881,3 +923,77 @@
 	return err;
 }
 
+int sdio_reset_comm(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	u32 ocr;
+	int err;
+
+	printk("%s():\n", __func__);
+	mmc_claim_host(host);
+
+	mmc_go_idle(host);
+
+	mmc_set_clock(host, host->f_min);
+
+	err = mmc_send_io_op_cond(host, 0, &ocr);
+	if (err)
+		goto err;
+
+	host->ocr = mmc_select_voltage(host, ocr);
+	if (!host->ocr) {
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+	if (err)
+		goto err;
+
+	if (mmc_host_is_spi(host)) {
+		err = mmc_spi_set_crc(host, use_spi_crc);
+		if (err)
+			goto err;
+	}
+
+	if (!mmc_host_is_spi(host)) {
+		err = mmc_send_relative_addr(host, &card->rca);
+		if (err)
+			goto err;
+		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+	}
+	if (!mmc_host_is_spi(host)) {
+		err = mmc_select_card(card);
+		if (err)
+			goto err;
+	}
+
+	/*
+	 * Switch to high-speed (if supported).
+	 */
+	err = sdio_enable_hs(card);
+	if (err > 0)
+		mmc_sd_go_highspeed(card);
+	else if (err)
+		goto err;
+
+	/*
+	 * Change to the card's maximum speed.
+	 */
+	mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+	err = sdio_enable_4bit_bus(card);
+	if (err > 0)
+		mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+	else if (err)
+		goto err;
+
+	mmc_release_host(host);
+	return 0;
+err:
+	printk("%s: Error resetting SDIO communications (%d)\n",
+	       mmc_hostname(host), err);
+	mmc_release_host(host);
+	return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d2565df..52429a9 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -23,6 +23,10 @@
 #include "sdio_cis.h"
 #include "sdio_bus.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
 /* show configuration fields */
 #define sdio_config_attr(field, format_string)				\
 static ssize_t								\
@@ -260,7 +264,14 @@
 {
 	struct sdio_func *func = dev_to_sdio_func(dev);
 
-	sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	/*
+	 * If this device is embedded then we never allocated
+	 * cis tables for this func
+	 */
+	if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+		sdio_free_func_cis(func);
 
 	if (func->info)
 		kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
old mode 100644
new mode 100755
index 0f687cd..549a341
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -383,6 +383,39 @@
 EXPORT_SYMBOL_GPL(sdio_readb);
 
 /**
+ *	sdio_readb_ext - read a single byte from a SDIO function
+ *	@func: SDIO function to access
+ *	@addr: address to read
+ *	@err_ret: optional status value from transfer
+ *	@in: value to add to argument
+ *
+ *	Reads a single byte from the address space of a given SDIO
+ *	function. If there is a problem reading the address, 0xff
+ *	is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+	int *err_ret, unsigned in)
+{
+	int ret;
+	unsigned char val;
+
+	BUG_ON(!func);
+
+	if (err_ret)
+		*err_ret = 0;
+
+	ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+	if (ret) {
+		if (err_ret)
+			*err_ret = ret;
+		return 0xFF;
+	}
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
  *	sdio_writeb - write a single byte to a SDIO function
  *	@func: SDIO function to access
  *	@b: byte to write
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 58d5436..7fc5848 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1044,7 +1044,7 @@
 	u16 clk = 0;
 	unsigned long timeout;
 
-	if (clock == host->clock)
+	if (clock && clock == host->clock)
 		return;
 
 	if (host->ops->set_clock) {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4c34252..43173a3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+	tristate "Include chip ids for known NAND devices."
+	depends on MTD
+	help
+	  Useful for NAND drivers that do not use the NAND subsystem but
+	  still like to take advantage of the known chip information.
+
 config MTD_NAND_ECC
 	tristate
 
@@ -121,6 +128,23 @@
 	help
           Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
 
+config MTD_NAND_OMAP_PREFETCH
+	bool "GPMC prefetch support for NAND Flash device"
+	depends on MTD_NAND_OMAP2
+	default y
+	help
+	 The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
+	 to improve the performance.
+
+config MTD_NAND_OMAP_PREFETCH_DMA
+	depends on MTD_NAND_OMAP_PREFETCH
+	bool "DMA mode"
+	default n
+	help
+	 The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
+	 or in DMA interrupt mode.
+	 Say y for DMA mode or MPU mode will be used
+
 config MTD_NAND_IDS
 	tristate
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a46e9bb..6516b08 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3217,6 +3217,44 @@
 }
 EXPORT_SYMBOL(nand_scan_ident);
 
+static void nand_panic_wait(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	int i;
+
+	if (chip->state != FL_READY)
+		for (i = 0; i < 40; i++) {
+			if (chip->dev_ready(mtd))
+				break;
+			mdelay(10);
+		}
+	chip->state = FL_READY;
+}
+
+static int nand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+			    size_t *retlen, const u_char *buf)
+{
+	struct nand_chip *chip = mtd->priv;
+	int ret;
+
+	/* Do not allow reads past end of device */
+	if ((to + len) > mtd->size)
+		return -EINVAL;
+	if (!len)
+		return 0;
+
+	nand_panic_wait(mtd);
+
+	chip->ops.len = len;
+	chip->ops.datbuf = (uint8_t *)buf;
+	chip->ops.oobbuf = NULL;
+
+	ret = nand_do_write_ops(mtd, to, &chip->ops);
+
+	*retlen = chip->ops.retlen;
+	return ret;
+}
+
 
 /**
  * nand_scan_tail - [NAND Interface] Scan for the NAND device
@@ -3460,6 +3498,7 @@
 	mtd->panic_write = panic_nand_write;
 	mtd->read_oob = nand_read_oob;
 	mtd->write_oob = nand_write_oob;
+	mtd->panic_write = nand_panic_write;
 	mtd->sync = nand_sync;
 	mtd->lock = NULL;
 	mtd->unlock = NULL;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 93359fa..906ef8f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3323,6 +3323,23 @@
 	  used by ISPs and enterprises to tunnel PPP traffic over UDP
 	  tunnels. L2TP is replacing PPTP for VPN uses.
 
+config PPPOLAC
+	tristate "PPP on L2TP Access Concentrator"
+	depends on PPP && INET
+	help
+	  L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles L2TP data packets between a UDP socket
+	  and a PPP channel, but only permits one session per socket. Thus it is
+	  fairly simple and suited for clients.
+
+config PPPOPNS
+	tristate "PPP on PPTP Network Server"
+	depends on PPP && INET
+	help
+	  PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles PPTP data packets between a RAW socket
+	  and a PPP channel. It is fairly simple and easy to use.
+
 config SLIP
 	tristate "SLIP (serial line) support"
 	---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 776a478..13ef4df 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -168,6 +168,8 @@
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
 
 obj-$(CONFIG_SLIP) += slip.o
 obj-$(CONFIG_SLHC) += slhc.o
diff --git a/drivers/net/pppolac.c b/drivers/net/pppolac.c
new file mode 100644
index 0000000..c94b850
--- /dev/null
+++ b/drivers/net/pppolac.c
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT	0x80
+#define L2TP_LENGTH_BIT		0x40
+#define L2TP_SEQUENCE_BIT	0x08
+#define L2TP_OFFSET_BIT		0x02
+#define L2TP_VERSION		0x02
+#define L2TP_VERSION_MASK	0x0F
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+union unaligned {
+	__u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+	return (union unaligned *)ptr;
+}
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+	struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	__u8 bits;
+	__u8 *ptr;
+
+	/* Drop the packet if L2TP header is missing. */
+	if (skb->len < sizeof(struct udphdr) + 6)
+		goto drop;
+
+	/* Put it back if it is a control packet. */
+	if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+		return opt->backlog_rcv(sk_udp, skb);
+
+	/* Skip UDP header. */
+	skb_pull(skb, sizeof(struct udphdr));
+
+	/* Check the version. */
+	if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+		goto drop;
+	bits = skb->data[0];
+	ptr = &skb->data[2];
+
+	/* Check the length if it is present. */
+	if (bits & L2TP_LENGTH_BIT) {
+		if ((ptr[0] << 8 | ptr[1]) != skb->len)
+			goto drop;
+		ptr += 2;
+	}
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+			(bits & L2TP_LENGTH_BIT ? 2 : 0) +
+			(bits & L2TP_OFFSET_BIT ? 2 : 0)))
+		goto drop;
+
+	/* Skip the offset padding if it is present. */
+	if (bits & L2TP_OFFSET_BIT &&
+			!skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+		goto drop;
+
+	/* Check the tunnel and the session. */
+	if (unaligned(ptr)->u32 != opt->local)
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (bits & L2TP_SEQUENCE_BIT) {
+		meta->sequence = ptr[4] << 8 | ptr[5];
+		if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+	if (bits & L2TP_SEQUENCE_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s16 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = (__u16)(meta->sequence + 1);
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+	sock_hold(sk_udp);
+	sk_receive_skb(sk_udp, skb, 0);
+	return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_udp = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = {
+			.msg_iov = (struct iovec *)&iov,
+			.msg_iovlen = 1,
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
+		sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_udp = (struct sock *)chan->private;
+	struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+
+	/* Install L2TP header. */
+	if (atomic_read(&opt->sequencing)) {
+		skb_push(skb, 10);
+		skb->data[0] = L2TP_SEQUENCE_BIT;
+		skb->data[6] = opt->xmit_sequence >> 8;
+		skb->data[7] = opt->xmit_sequence;
+		skb->data[8] = 0;
+		skb->data[9] = 0;
+		opt->xmit_sequence++;
+	} else {
+		skb_push(skb, 6);
+		skb->data[0] = 0;
+	}
+	skb->data[1] = L2TP_VERSION;
+	unaligned(&skb->data[2])->u32 = opt->remote;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_udp);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+	.start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+	struct socket *sock_udp = NULL;
+	struct sock *sk_udp;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppolac) ||
+			!addr->local.tunnel || !addr->local.session ||
+			!addr->remote.tunnel || !addr->remote.session) {
+		return -EINVAL;
+	}
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_udp = sockfd_lookup(addr->udp_socket, &error);
+	if (!sock_udp)
+		goto out;
+	sk_udp = sock_udp->sk;
+	lock_sock(sk_udp);
+
+	/* Remove this check when IPv6 supports UDP encapsulation. */
+	error = -EAFNOSUPPORT;
+	if (sk_udp->sk_family != AF_INET)
+		goto out;
+	error = -EPROTONOSUPPORT;
+	if (sk_udp->sk_protocol != IPPROTO_UDP)
+		goto out;
+	error = -EDESTADDRREQ;
+	if (sk_udp->sk_state != TCP_ESTABLISHED)
+		goto out;
+	error = -EBUSY;
+	if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+		goto out;
+	if (!sk_udp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_udp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	po->chan.hdrlen = 12;
+	po->chan.private = sk_udp;
+	po->chan.ops = &pppolac_channel_ops;
+	po->chan.mtu = PPP_MTU - 80;
+	po->proto.lac.local = unaligned(&addr->local)->u32;
+	po->proto.lac.remote = unaligned(&addr->remote)->u32;
+	atomic_set(&po->proto.lac.sequencing, 1);
+	po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+	udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+	sk_udp->sk_backlog_rcv = pppolac_recv_core;
+	sk_udp->sk_user_data = sk;
+out:
+	if (sock_udp) {
+		release_sock(sk_udp);
+		if (error)
+			sockfd_put(sock_udp);
+	}
+	release_sock(sk);
+	return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_udp);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		udp_sk(sk_udp)->encap_type = 0;
+		udp_sk(sk_udp)->encap_rcv = NULL;
+		sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+		sk_udp->sk_user_data = NULL;
+		release_sock(sk_udp);
+		sockfd_put(sk_udp->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+	.name = "PPPOLAC",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppolac_release,
+	.bind = sock_no_bind,
+	.connect = pppolac_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppolac_proto_ops;
+	sk->sk_protocol = PX_PROTO_OLAC;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+	.create = pppolac_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+	int error;
+
+	error = proto_register(&pppolac_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+	if (error)
+		proto_unregister(&pppolac_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OLAC);
+	proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pppopns.c b/drivers/net/pppopns.c
new file mode 100644
index 0000000..fb81984
--- /dev/null
+++ b/drivers/net/pppopns.c
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE		8
+
+#define PPTP_GRE_BITS		htons(0x2001)
+#define PPTP_GRE_BITS_MASK	htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT	htons(0x1000)
+#define PPTP_GRE_ACK_BIT	htons(0x0080)
+#define PPTP_GRE_TYPE		htons(0x880B)
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+struct header {
+	__u16	bits;
+	__u16	type;
+	__u16	length;
+	__u16	call;
+	__u32	sequence;
+} __attribute__((packed));
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+	struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	struct header *hdr;
+
+	/* Skip transport header */
+	skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+	/* Drop the packet if GRE header is missing. */
+	if (skb->len < GRE_HEADER_SIZE)
+		goto drop;
+	hdr = (struct header *)skb->data;
+
+	/* Check the header. */
+	if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+			(hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+		goto drop;
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, GRE_HEADER_SIZE +
+			(hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+			(hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+		goto drop;
+
+	/* Check the length. */
+	if (skb->len != ntohs(hdr->length))
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		meta->sequence = ntohl(hdr->sequence);
+		if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s32 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = meta->sequence + 1;
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+	struct sk_buff *skb;
+	while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+		sock_hold(sk_raw);
+		sk_receive_skb(sk_raw, skb, 0);
+	}
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_raw = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = {
+			.msg_iov = (struct iovec *)&iov,
+			.msg_iovlen = 1,
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
+		sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_raw = (struct sock *)chan->private;
+	struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+	struct header *hdr;
+	__u16 length;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+	length = skb->len;
+
+	/* Install PPTP GRE header. */
+	hdr = (struct header *)skb_push(skb, 12);
+	hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+	hdr->type = PPTP_GRE_TYPE;
+	hdr->length = htons(length);
+	hdr->call = opt->remote;
+	hdr->sequence = htonl(opt->xmit_sequence);
+	opt->xmit_sequence++;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_raw);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+	.start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+	struct sockaddr_storage ss;
+	struct socket *sock_tcp = NULL;
+	struct socket *sock_raw = NULL;
+	struct sock *sk_tcp;
+	struct sock *sk_raw;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppopns))
+		return -EINVAL;
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+	if (!sock_tcp)
+		goto out;
+	sk_tcp = sock_tcp->sk;
+	error = -EPROTONOSUPPORT;
+	if (sk_tcp->sk_protocol != IPPROTO_TCP)
+		goto out;
+	addrlen = sizeof(struct sockaddr_storage);
+	error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+	if (error)
+		goto out;
+	if (!sk_tcp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_tcp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+	if (error)
+		goto out;
+	sk_raw = sock_raw->sk;
+	sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+	error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+	if (error)
+		goto out;
+
+	po->chan.hdrlen = 14;
+	po->chan.private = sk_raw;
+	po->chan.ops = &pppopns_channel_ops;
+	po->chan.mtu = PPP_MTU - 80;
+	po->proto.pns.local = addr->local;
+	po->proto.pns.remote = addr->remote;
+	po->proto.pns.data_ready = sk_raw->sk_data_ready;
+	po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	lock_sock(sk_raw);
+	sk_raw->sk_data_ready = pppopns_recv;
+	sk_raw->sk_backlog_rcv = pppopns_recv_core;
+	sk_raw->sk_user_data = sk;
+	release_sock(sk_raw);
+out:
+	if (sock_tcp)
+		sockfd_put(sock_tcp);
+	if (error && sock_raw)
+		sock_release(sock_raw);
+	release_sock(sk);
+	return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_raw);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+		sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+		sk_raw->sk_user_data = NULL;
+		release_sock(sk_raw);
+		sock_release(sk_raw->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+	.name = "PPPOPNS",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppopns_release,
+	.bind = sock_no_bind,
+	.connect = pppopns_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppopns_proto_ops;
+	sk->sk_protocol = PX_PROTO_OPNS;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+	.create = pppopns_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+	int error;
+
+	error = proto_register(&pppopns_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+	if (error)
+		proto_unregister(&pppopns_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OPNS);
+	proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f354bd4..f1d88c5 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -268,9 +268,16 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mwl8k.  If unsure, say N.
 
+config WIFI_CONTROL_FUNC
+	bool "Enable WiFi control function abstraction"
+	help
+	  Enables Power/Reset/Carddetect function abstraction
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
+source "drivers/net/wireless/bcm4329/Kconfig"
+source "drivers/net/wireless/bcmdhd/Kconfig"
 source "drivers/net/wireless/hostap/Kconfig"
 source "drivers/net/wireless/ipw2x00/Kconfig"
 source "drivers/net/wireless/iwlwifi/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7bba6a8..8ceae0a 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -58,3 +58,6 @@
 obj-$(CONFIG_IWM)	+= iwmc3200wifi/
 
 obj-$(CONFIG_MWIFIEX)	+= mwifiex/
+
+obj-$(CONFIG_BCM4329)	+= bcm4329/
+obj-$(CONFIG_BCMDHD)	+= bcmdhd/
diff --git a/drivers/net/wireless/bcm4329/Kconfig b/drivers/net/wireless/bcm4329/Kconfig
new file mode 100644
index 0000000..ca5760d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/Kconfig
@@ -0,0 +1,27 @@
+config BCM4329
+	tristate "Broadcom 4329 wireless cards support"
+	depends on MMC
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4329 chipset.
+
+	  This driver uses the kernel's wireless extensions subsystem.
+
+	  If you choose to build a module, it'll be called dhd. Say M if
+	  unsure.
+
+config BCM4329_FW_PATH
+	depends on BCM4329
+	string "Firmware path"
+	default "/system/etc/firmware/fw_bcm4329.bin"
+	---help---
+	  Path to the firmware file.
+
+config BCM4329_NVRAM_PATH
+	depends on BCM4329
+	string "NVRAM path"
+	default "/proc/calibration"
+	---help---
+	  Path to the calibration file.
diff --git a/drivers/net/wireless/bcm4329/Makefile b/drivers/net/wireless/bcm4329/Makefile
new file mode 100644
index 0000000..5a662be
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/Makefile
@@ -0,0 +1,21 @@
+# bcm4329
+DHDCFLAGS = -DLINUX -DBCMDRIVER -DBCMDONGLEHOST -DDHDTHREAD -DBCMWPA2         \
+	-DUNRELEASEDCHIP -Dlinux -DDHD_SDALIGN=64 -DMAX_HDR_READ=64           \
+	-DDHD_FIRSTREAD=64 -DDHD_GPL -DDHD_SCHED -DBDC -DTOE -DDHD_BCMEVENTS  \
+	-DSHOW_EVENTS -DBCMSDIO -DDHD_GPL -DBCMLXSDMMC -DBCMPLATFORM_BUS      \
+	-Wall -Wstrict-prototypes -Werror -DOOB_INTR_ONLY -DCUSTOMER_HW2      \
+	-DDHD_USE_STATIC_BUF -DMMC_SDIO_ABORT -DDHD_DEBUG_TRAP -DSOFTAP       \
+	-DEMBEDDED_PLATFORM -DARP_OFFLOAD_SUPPORT -DPKT_FILTER_SUPPORT        \
+	-DGET_CUSTOM_MAC_ENABLE -DSET_RANDOM_MAC_SOFTAP -DCSCAN -DHW_OOB      \
+	-DKEEP_ALIVE -DPNO_SUPPORT                                            \
+	-Idrivers/net/wireless/bcm4329 -Idrivers/net/wireless/bcm4329/include
+
+DHDOFILES = dhd_linux.o linux_osl.o bcmutils.o dhd_common.o dhd_custom_gpio.o \
+	wl_iw.o siutils.o sbutils.o aiutils.o hndpmu.o bcmwifi.o dhd_sdio.o   \
+	dhd_linux_sched.o dhd_cdc.o bcmsdh_sdmmc.o bcmsdh.o bcmsdh_linux.o    \
+	bcmsdh_sdmmc_linux.o
+
+obj-$(CONFIG_BCM4329) += bcm4329.o
+bcm4329-objs += $(DHDOFILES)
+EXTRA_CFLAGS = $(DHDCFLAGS)
+EXTRA_LDFLAGS += --strip-debug
diff --git a/drivers/net/wireless/bcm4329/aiutils.c b/drivers/net/wireless/bcm4329/aiutils.c
new file mode 100644
index 0000000..df48ac0
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/aiutils.c
@@ -0,0 +1,686 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c,v 1.6.4.7.4.6 2010/04/21 20:43:47 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+STATIC uint32
+get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st,
+	uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh);
+
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t *sih, uint32 *eromptr, uint32 mask, uint32 match)
+{
+	uint32 ent;
+	uint inv = 0, nom = 0;
+
+	while (TRUE) {
+		ent = R_REG(si_osh(sih), (uint32 *)(uintptr)(*eromptr));
+		*eromptr += sizeof(uint32);
+
+		if (mask == 0)
+			break;
+
+		if ((ent & ER_VALID) == 0) {
+			inv++;
+			continue;
+		}
+
+		if (ent == (ER_END | ER_VALID))
+			break;
+
+		if ((ent & mask) == match)
+			break;
+
+		nom++;
+	}
+
+	SI_MSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+	if (inv + nom)
+		SI_MSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
+	return ent;
+}
+
+STATIC uint32
+get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st,
+	uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh)
+{
+	uint32 asd, sz, szd;
+
+	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+	if (((asd & ER_TAG1) != ER_ADD) ||
+	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+	    ((asd & AD_ST_MASK) != st)) {
+		/* This is not what we want, "push" it back */
+		*eromptr -= sizeof(uint32);
+		return 0;
+	}
+	*addrl = asd & AD_ADDR_MASK;
+	if (asd & AD_AG32)
+		*addrh = get_erom_ent(sih, eromptr, 0, 0);
+	else
+		*addrh = 0;
+	*sizeh = 0;
+	sz = asd & AD_SZ_MASK;
+	if (sz == AD_SZ_SZD) {
+		szd = get_erom_ent(sih, eromptr, 0, 0);
+		*sizel = szd & SD_SZ_MASK;
+		if (szd & SD_SG32)
+			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
+	} else
+		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+	SI_MSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+	return asd;
+}
+
+/* parse the enumeration rom to identify all cores */
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc = (chipcregs_t *)regs;
+	uint32 erombase, eromptr, eromlim;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		eromptr = (uintptr)REG_MAP(erombase, SI_CORE_SIZE);
+		break;
+
+	case PCI_BUS:
+		/* Set wrappers address */
+		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+		/* Now point the window at the erom */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		eromptr = (uint32)(uintptr)regs;
+		break;
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		eromptr = erombase;
+		break;
+
+	case PCMCIA_BUS:
+	default:
+		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+		ASSERT(0);
+		return;
+	}
+	eromlim = eromptr + ER_REMAPCONTROL;
+
+	SI_MSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%08x, eromlim = 0x%08x\n",
+	        regs, erombase, eromptr, eromlim));
+	while (eromptr < eromlim) {
+		uint32 cia, cib, base, cid, mfg, crev, nmw, nsw, nmp, nsp;
+		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+		uint i, j, idx;
+		bool br;
+
+		br = FALSE;
+
+		/* Grok a component */
+		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+		if (cia == (ER_END | ER_VALID)) {
+			SI_MSG(("Found END of erom after %d cores\n", sii->numcores));
+			return;
+		}
+		base = eromptr - sizeof(uint32);
+		cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+		if ((cib & ER_TAG) != ER_CI) {
+			SI_ERROR(("CIA not followed by CIB\n"));
+			goto error;
+		}
+
+		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+		SI_MSG(("Found component 0x%04x/0x%4x rev %d at erom addr 0x%08x, with nmw = %d, "
+		        "nsw = %d, nmp = %d & nsp = %d\n",
+		        mfg, cid, crev, base, nmw, nsw, nmp, nsp));
+
+		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+			continue;
+		if ((nmw + nsw == 0)) {
+			/* A component which is not a core */
+			if (cid == OOB_ROUTER_CORE_ID) {
+				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+					&addrl, &addrh, &sizel, &sizeh);
+				if (asd != 0) {
+					sii->common_info->oob_router = addrl;
+				}
+			}
+			continue;
+		}
+
+		idx = sii->numcores;
+/*		sii->eromptr[idx] = base; */
+		sii->common_info->cia[idx] = cia;
+		sii->common_info->cib[idx] = cib;
+		sii->common_info->coreid[idx] = cid;
+
+		for (i = 0; i < nmp; i++) {
+			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+			if ((mpd & ER_TAG) != ER_MP) {
+				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+				goto error;
+			}
+			SI_MSG(("  Master port %d, mp: %d id: %d\n", i,
+			        (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+			        (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+		}
+
+		/* First Slave Address Descriptor should be port 0:
+		 * the main register space for the core
+		 */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+		if (asd == 0) {
+			/* Try again to see if it is a bridge */
+			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd != 0)
+				br = TRUE;
+			else
+				if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+					SI_ERROR(("First Slave ASD for core 0x%04x malformed "
+					          "(0x%08x)\n", cid, asd));
+					goto error;
+				}
+		}
+		sii->common_info->coresba[idx] = addrl;
+		sii->common_info->coresba_size[idx] = sizel;
+		/* Get any more ASDs in port 0 */
+		j = 1;
+		do {
+			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE))
+				sii->common_info->coresba2[idx] = addrl;
+				sii->common_info->coresba2_size[idx] = sizel;
+			j++;
+		} while (asd != 0);
+
+		/* Go through the ASDs for other slave ports */
+		for (i = 1; i < nsp; i++) {
+			j = 0;
+			do {
+				asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh,
+				              &sizel, &sizeh);
+			} while (asd != 0);
+			if (j == 0) {
+				SI_ERROR((" SP %d has no address descriptors\n", i));
+				goto error;
+			}
+		}
+
+		/* Now get master wrappers */
+		for (i = 0; i < nmw; i++) {
+			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for MW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if (i == 0)
+				sii->common_info->wrapba[idx] = addrl;
+		}
+
+		/* And finally slave wrappers */
+		for (i = 0; i < nsw; i++) {
+			uint fwp = (nsp == 1) ? 0 : 1;
+			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for SW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if ((nmw == 0) && (i == 0))
+				sii->common_info->wrapba[idx] = addrl;
+		}
+
+		/* Don't record bridges */
+		if (br)
+			continue;
+
+		/* Done with core */
+		sii->numcores++;
+	}
+
+	SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+	sii->numcores = 0;
+	return;
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 addr = sii->common_info->coresba[coreidx];
+	uint32 wrap = sii->common_info->wrapba[coreidx];
+	void *regs;
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!sii->common_info->regs[coreidx]) {
+			sii->common_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+		}
+		sii->curmap = regs = sii->common_info->regs[coreidx];
+		if (!sii->common_info->wrappers[coreidx]) {
+			sii->common_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->common_info->wrappers[coreidx]));
+		}
+		sii->curwrap = sii->common_info->wrappers[coreidx];
+		break;
+
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		sii->curmap = regs = (void *)((uintptr)addr);
+		sii->curwrap = (void *)((uintptr)wrap);
+		break;
+
+	case PCMCIA_BUS:
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	sii->curmap = regs;
+	sii->curidx = coreidx;
+
+	return regs;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(si_t *sih)
+{
+	return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+	uint cidx;
+
+	sii = SI_INFO(sih);
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return sii->common_info->coresba[cidx];
+	else if (asidx == 1)
+		return sii->common_info->coresba2[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+	uint cidx;
+
+	sii = SI_INFO(sih);
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return sii->common_info->coresba_size[cidx];
+	else if (asidx == 1)
+		return sii->common_info->coresba2_size[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+uint
+ai_flag(si_t *sih)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+
+	sii = SI_INFO(sih);
+	ai = sii->curwrap;
+
+	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+void
+ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai = sii->curwrap;
+	W_REG(sii->osh, (uint32 *)((uint8 *)ai+offset), val);
+	return;
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	uint32 cia;
+
+	sii = SI_INFO(sih);
+	cia = sii->common_info->cia[sii->curidx];
+	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	uint32 cib;
+
+	sii = SI_INFO(sih);
+	cib = sii->common_info->cib[sii->curidx];
+	return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+
+	sii = SI_INFO(sih);
+	ai = sii->curwrap;
+
+	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!sii->common_info->wrappers[coreidx]) {
+			sii->common_info->regs[coreidx] =
+			    REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_REG(sii->osh, r) & ~mask) | val;
+		W_REG(sii->osh, r, w);
+	}
+
+	/* readback */
+	w = R_REG(sii->osh, r);
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			ai_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	aidmp_t *ai;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* if core is already in reset, just return */
+	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+		return;
+
+	W_REG(sii->osh, &ai->ioctrl, bits);
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	OSL_DELAY(10);
+
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	ai_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	W_REG(sii->osh, &ai->resetctrl, 0);
+	OSL_DELAY(1);
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	OSL_DELAY(1);
+}
+
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+
+	return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+		W_REG(sii->osh, &ai->iostatus, w);
+	}
+
+	return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmpcispi.c b/drivers/net/wireless/bcm4329/bcmpcispi.c
new file mode 100644
index 0000000..1a8b671
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmpcispi.c
@@ -0,0 +1,630 @@
+/*
+ * Broadcom SPI over PCI-SPI Host Controller, low-level hardware driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.c,v 1.22.2.4.4.5.6.1 2010/08/13 00:26:05 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <sdio.h>		/* SDIO Specs */
+#include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>		/* to get msglevel bit values */
+
+#include <pcicfg.h>
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+#include <bcmpcispi.h>		/* BRCM PCI-SPI Host Controller Register definitions */
+
+
+/* ndis_osl.h needs to do a runtime check of the osh to map
+ * R_REG/W_REG to bus specific access similar to linux_osl.h.
+ * Until then...
+ */
+/* linux */
+
+#define SPIPCI_RREG R_REG
+#define SPIPCI_WREG W_REG
+
+
+#define	SPIPCI_ANDREG(osh, r, v) SPIPCI_WREG(osh, (r), (SPIPCI_RREG(osh, r) & (v)))
+#define	SPIPCI_ORREG(osh, r, v)	SPIPCI_WREG(osh, (r), (SPIPCI_RREG(osh, r) | (v)))
+
+
+int bcmpcispi_dump = 0;		/* Set to dump complete trace of all SPI bus transactions */
+
+typedef struct spih_info_ {
+	uint		bar0;		/* BAR0 of PCI Card */
+	uint		bar1;		/* BAR1 of PCI Card */
+	osl_t 		*osh;		/* osh handle */
+	spih_pciregs_t	*pciregs;	/* PCI Core Registers */
+	spih_regs_t	*regs;		/* SPI Controller Registers */
+	uint8		rev;		/* PCI Card Revision ID */
+} spih_info_t;
+
+
+/* Attach to PCI-SPI Host Controller Hardware */
+bool
+spi_hw_attach(sdioh_info_t *sd)
+{
+	osl_t *osh;
+	spih_info_t *si;
+
+	sd_trace(("%s: enter\n", __FUNCTION__));
+
+	osh = sd->osh;
+
+	if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) {
+		sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		return FALSE;
+	}
+
+	bzero(si, sizeof(spih_info_t));
+
+	sd->controller = si;
+
+	si->osh = sd->osh;
+	si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF;
+
+	if (si->rev < 3) {
+		sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev));
+		MFREE(osh, si, sizeof(spih_info_t));
+		return (FALSE);
+	}
+
+	sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev));
+
+	/* FPGA Revision < 3 not supported by driver anymore. */
+	ASSERT(si->rev >= 3);
+
+	si->bar0 = sd->bar0;
+
+	/* Rev < 10 PciSpiHost has 2 BARs:
+	 *    BAR0 = PCI Core Registers
+	 *    BAR1 = PciSpiHost Registers (all other cores on backplane)
+	 *
+	 * Rev 10 and up use a different PCI core which only has a single
+	 * BAR0 which contains the PciSpiHost Registers.
+	 */
+	if (si->rev < 10) {
+		si->pciregs = (spih_pciregs_t *)spi_reg_map(osh,
+		                                              (uintptr)si->bar0,
+		                                              sizeof(spih_pciregs_t));
+		sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs));
+
+		si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4);
+		si->regs = (spih_regs_t *)spi_reg_map(osh,
+		                                        (uintptr)si->bar1,
+		                                        sizeof(spih_regs_t));
+		sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs));
+	} else {
+		si->regs = (spih_regs_t *)spi_reg_map(osh,
+		                                              (uintptr)si->bar0,
+		                                              sizeof(spih_regs_t));
+		sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs));
+		si->pciregs = NULL;
+	}
+	/* Enable SPI Controller, 16.67MHz SPI Clock */
+	SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1);
+
+	/* Set extended feature register to defaults */
+	SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000);
+
+	/* Set GPIO CS# High (de-asserted) */
+	SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS);
+
+	/* set GPIO[0] to output for CS# */
+	/* set GPIO[1] to output for power control */
+	/* set GPIO[2] to input for card detect */
+	SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER));
+
+	/* Clear out the Read FIFO in case there is any stuff left in there from a previous run. */
+	while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) {
+		SPIPCI_RREG(osh, &si->regs->spih_data);
+	}
+
+	/* Wait for power to stabilize to the SDIO Card (100msec was insufficient) */
+	OSL_DELAY(250000);
+
+	/* Check card detect on FPGA Revision >= 4 */
+	if (si->rev >= 4) {
+		if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) {
+			sd_err(("%s: no card detected in SD slot\n", __FUNCTION__));
+			spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t));
+			if (si->pciregs) {
+				spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t));
+			}
+			MFREE(osh, si, sizeof(spih_info_t));
+			return FALSE;
+		}
+	}
+
+	/* Interrupts are level sensitive */
+	SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000);
+
+	/* Interrupts are active low. */
+	SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004);
+
+	/* Enable interrupts through PCI Core. */
+	if (si->pciregs) {
+		SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN);
+	}
+
+	sd_trace(("%s: exit\n", __FUNCTION__));
+	return TRUE;
+}
+
+/* Detach and return PCI-SPI Hardware to unconfigured state */
+bool
+spi_hw_detach(sdioh_info_t *sd)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+	spih_pciregs_t *pciregs = si->pciregs;
+
+	sd_trace(("%s: enter\n", __FUNCTION__));
+
+	SPIPCI_WREG(osh, &regs->spih_ctrl, 0x00000010);
+	SPIPCI_WREG(osh, &regs->spih_gpio_ctrl, 0x00000000);	/* Disable GPIO for CS# */
+	SPIPCI_WREG(osh, &regs->spih_int_mask, 0x00000000);	/* Clear Intmask */
+	SPIPCI_WREG(osh, &regs->spih_hex_disp, 0x0000DEAF);
+	SPIPCI_WREG(osh, &regs->spih_int_edge, 0x00000000);
+	SPIPCI_WREG(osh, &regs->spih_int_pol, 0x00000000);
+	SPIPCI_WREG(osh, &regs->spih_hex_disp, 0x0000DEAD);
+
+	/* Disable interrupts through PCI Core. */
+	if (si->pciregs) {
+		SPIPCI_WREG(osh, &pciregs->ICR, 0x00000000);
+		spi_reg_unmap(osh, (uintptr)pciregs, sizeof(spih_pciregs_t));
+	}
+	spi_reg_unmap(osh, (uintptr)regs, sizeof(spih_regs_t));
+
+	MFREE(osh, si, sizeof(spih_info_t));
+
+	sd->controller = NULL;
+
+	sd_trace(("%s: exit\n", __FUNCTION__));
+	return TRUE;
+}
+
+/* Switch between internal (PCI) and external clock oscillator */
+static bool
+sdspi_switch_clock(sdioh_info_t *sd, bool ext_clk)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+
+	/* Switch to desired clock, and reset the PLL. */
+	SPIPCI_WREG(osh, &regs->spih_pll_ctrl, ext_clk ? SPIH_EXT_CLK : 0);
+
+	SPINWAIT(((SPIPCI_RREG(osh, &regs->spih_pll_status) & SPIH_PLL_LOCKED)
+	          != SPIH_PLL_LOCKED), 1000);
+	if ((SPIPCI_RREG(osh, &regs->spih_pll_status) & SPIH_PLL_LOCKED) != SPIH_PLL_LOCKED) {
+		sd_err(("%s: timeout waiting for PLL to lock\n", __FUNCTION__));
+		return (FALSE);
+	}
+	return (TRUE);
+
+}
+
+/* Configure PCI-SPI Host Controller's SPI Clock rate as a divisor into the
+ * base clock rate.  The base clock is either the PCI Clock (33MHz) or the
+ * external clock oscillator at U17 on the PciSpiHost.
+ */
+bool
+spi_start_clock(sdioh_info_t *sd, uint16 div)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+	uint32 t, espr, disp;
+	uint32 disp_xtal_freq;
+	bool	ext_clock = FALSE;
+	char disp_string[5];
+
+	if (div > 2048) {
+		sd_err(("%s: divisor %d too large; using max of 2048\n", __FUNCTION__, div));
+		div = 2048;
+	} else if (div & (div - 1)) {	/* Not a power of 2? */
+		/* Round up to a power of 2 */
+		while ((div + 1) & div)
+			div |= div >> 1;
+		div++;
+	}
+
+	/* For FPGA Rev >= 5, the use of an external clock oscillator is supported.
+	 * If the oscillator is populated, use it to provide the SPI base clock,
+	 * otherwise, default to the PCI clock as the SPI base clock.
+	 */
+	if (si->rev >= 5) {
+		uint32 clk_tick;
+		/* Enable the External Clock Oscillator as PLL clock source. */
+		if (!sdspi_switch_clock(sd, TRUE)) {
+			sd_err(("%s: error switching to external clock\n", __FUNCTION__));
+		}
+
+		/* Check to make sure the external clock is running.  If not, then it
+		 * is not populated on the card, so we will default to the PCI clock.
+		 */
+		clk_tick = SPIPCI_RREG(osh, &regs->spih_clk_count);
+		if (clk_tick == SPIPCI_RREG(osh, &regs->spih_clk_count)) {
+
+			/* Switch back to the PCI clock as the clock source. */
+			if (!sdspi_switch_clock(sd, FALSE)) {
+				sd_err(("%s: error switching to external clock\n", __FUNCTION__));
+			}
+		} else {
+			ext_clock = TRUE;
+		}
+	}
+
+	/* Hack to allow hot-swapping oscillators:
+	 * 1. Force PCI clock as clock source, using sd_divisor of 0.
+	 * 2. Swap oscillator
+	 * 3. Set desired sd_divisor (will switch to external oscillator as clock source.
+	 */
+	if (div == 0) {
+		ext_clock = FALSE;
+		div = 2;
+
+		/* Select PCI clock as the clock source. */
+		if (!sdspi_switch_clock(sd, FALSE)) {
+			sd_err(("%s: error switching to external clock\n", __FUNCTION__));
+		}
+
+		sd_err(("%s: Ok to hot-swap oscillators.\n", __FUNCTION__));
+	}
+
+	/* If using the external oscillator, read the clock frequency from the controller
+	 * The value read is in units of 10000Hz, and it's not a nice round number because
+	 * it is calculated by the FPGA.  So to make up for that, we round it off.
+	 */
+	if (ext_clock == TRUE) {
+		uint32 xtal_freq;
+
+		OSL_DELAY(1000);
+		xtal_freq = SPIPCI_RREG(osh, &regs->spih_xtal_freq) * 10000;
+
+		sd_info(("%s: Oscillator is %dHz\n", __FUNCTION__, xtal_freq));
+
+
+		disp_xtal_freq = xtal_freq / 10000;
+
+		/* Round it off to a nice number. */
+		if ((disp_xtal_freq % 100) > 50) {
+			disp_xtal_freq += 100;
+		}
+
+		disp_xtal_freq = (disp_xtal_freq / 100) * 100;
+	} else {
+		sd_err(("%s: no external oscillator installed, using PCI clock.\n", __FUNCTION__));
+		disp_xtal_freq = 3333;
+	}
+
+	/* Convert the SPI Clock frequency to BCD format. */
+	sprintf(disp_string, "%04d", disp_xtal_freq / div);
+
+	disp  = (disp_string[0] - '0') << 12;
+	disp |= (disp_string[1] - '0') << 8;
+	disp |= (disp_string[2] - '0') << 4;
+	disp |= (disp_string[3] - '0');
+
+	/* Select the correct ESPR register value based on the divisor. */
+	switch (div) {
+		case 1:		espr = 0x0; break;
+		case 2:		espr = 0x1; break;
+		case 4:		espr = 0x2; break;
+		case 8:		espr = 0x5; break;
+		case 16:	espr = 0x3; break;
+		case 32:	espr = 0x4; break;
+		case 64:	espr = 0x6; break;
+		case 128:	espr = 0x7; break;
+		case 256:	espr = 0x8; break;
+		case 512:	espr = 0x9; break;
+		case 1024:	espr = 0xa; break;
+		case 2048:	espr = 0xb; break;
+		default:	espr = 0x0; ASSERT(0); break;
+	}
+
+	t = SPIPCI_RREG(osh, &regs->spih_ctrl);
+	t &= ~3;
+	t |= espr & 3;
+	SPIPCI_WREG(osh, &regs->spih_ctrl, t);
+
+	t = SPIPCI_RREG(osh, &regs->spih_ext);
+	t &= ~3;
+	t |= (espr >> 2) & 3;
+	SPIPCI_WREG(osh, &regs->spih_ext, t);
+
+	SPIPCI_WREG(osh, &regs->spih_hex_disp, disp);
+
+	/* For Rev 8, writing to the PLL_CTRL register resets
+	 * the PLL, and it can re-acquire in 200uS.  For
+	 * Rev 7 and older, we use a software delay to allow
+	 * the PLL to re-acquire, which takes more than 2mS.
+	 */
+	if (si->rev < 8) {
+		/* Wait for clock to settle. */
+		OSL_DELAY(5000);
+	}
+
+	sd_info(("%s: SPI_CTRL=0x%08x SPI_EXT=0x%08x\n",
+	         __FUNCTION__,
+	         SPIPCI_RREG(osh, &regs->spih_ctrl),
+	         SPIPCI_RREG(osh, &regs->spih_ext)));
+
+	return TRUE;
+}
+
+/* Configure PCI-SPI Host Controller High-Speed Clocking mode setting */
+bool
+spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+
+	if (si->rev >= 10) {
+		if (hsmode) {
+			SPIPCI_ORREG(osh, &regs->spih_ext, 0x10);
+		} else {
+			SPIPCI_ANDREG(osh, &regs->spih_ext, ~0x10);
+		}
+	}
+
+	return TRUE;
+}
+
+/* Disable device interrupt */
+void
+spi_devintr_off(sdioh_info_t *sd)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	if (sd->use_client_ints) {
+		sd->intmask &= ~SPIH_DEV_INTR;
+		SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);	/* Clear Intmask */
+	}
+}
+
+/* Enable device interrupt */
+void
+spi_devintr_on(sdioh_info_t *sd)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+
+	ASSERT(sd->lockcount == 0);
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	if (sd->use_client_ints) {
+		if (SPIPCI_RREG(osh, &regs->spih_ctrl) & 0x02) {
+			/* Ack in case one was pending but is no longer... */
+			SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_DEV_INTR);
+		}
+		sd->intmask |= SPIH_DEV_INTR;
+		/* Set device intr in Intmask */
+		SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);
+	}
+}
+
+/* Check to see if an interrupt belongs to the PCI-SPI Host or a SPI Device */
+bool
+spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+	bool ours = FALSE;
+
+	uint32 raw_int, cur_int;
+	ASSERT(sd);
+
+	if (is_dev_intr)
+		*is_dev_intr = FALSE;
+	raw_int = SPIPCI_RREG(osh, &regs->spih_int_status);
+	cur_int = raw_int & sd->intmask;
+	if (cur_int & SPIH_DEV_INTR) {
+		if (sd->client_intr_enabled && sd->use_client_ints) {
+			sd->intrcount++;
+			ASSERT(sd->intr_handler);
+			ASSERT(sd->intr_handler_arg);
+			(sd->intr_handler)(sd->intr_handler_arg);
+			if (is_dev_intr)
+				*is_dev_intr = TRUE;
+		} else {
+			sd_trace(("%s: Not ready for intr: enabled %d, handler 0x%p\n",
+			        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+		}
+		SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_DEV_INTR);
+		SPIPCI_RREG(osh, &regs->spih_int_status);
+		ours = TRUE;
+	} else if (cur_int & SPIH_CTLR_INTR) {
+		/* Interrupt is from SPI FIFO... just clear and ack it... */
+		sd_trace(("%s: SPI CTLR interrupt: raw_int 0x%08x cur_int 0x%08x\n",
+		          __FUNCTION__, raw_int, cur_int));
+
+		/* Clear the interrupt in the SPI_STAT register */
+		SPIPCI_WREG(osh, &regs->spih_stat, 0x00000080);
+
+		/* Ack the interrupt in the interrupt controller */
+		SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_CTLR_INTR);
+		SPIPCI_RREG(osh, &regs->spih_int_status);
+
+		ours = TRUE;
+	} else if (cur_int & SPIH_WFIFO_INTR) {
+		sd_trace(("%s: SPI WR FIFO Empty interrupt: raw_int 0x%08x cur_int 0x%08x\n",
+		          __FUNCTION__, raw_int, cur_int));
+
+		/* Disable the FIFO Empty Interrupt */
+		sd->intmask &= ~SPIH_WFIFO_INTR;
+		SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);
+
+		sd->local_intrcount++;
+		sd->got_hcint = TRUE;
+		ours = TRUE;
+	} else {
+		/* Not an error: can share interrupts... */
+		sd_trace(("%s: Not my interrupt: raw_int 0x%08x cur_int 0x%08x\n",
+		          __FUNCTION__, raw_int, cur_int));
+		ours = FALSE;
+	}
+
+	return ours;
+}
+
+static void
+hexdump(char *pfx, unsigned char *msg, int msglen)
+{
+	int i, col;
+	char buf[80];
+
+	ASSERT(strlen(pfx) + 49 <= sizeof(buf));
+
+	col = 0;
+
+	for (i = 0; i < msglen; i++, col++) {
+		if (col % 16 == 0)
+			strcpy(buf, pfx);
+		sprintf(buf + strlen(buf), "%02x", msg[i]);
+		if ((col + 1) % 16 == 0)
+			printf("%s\n", buf);
+		else
+			sprintf(buf + strlen(buf), " ");
+	}
+
+	if (col % 16 != 0)
+		printf("%s\n", buf);
+}
+
+/* Send/Receive an SPI Packet */
+void
+spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+	uint32 count;
+	uint32 spi_data_out;
+	uint32 spi_data_in;
+	bool yield;
+
+	sd_trace(("%s: enter\n", __FUNCTION__));
+
+	if (bcmpcispi_dump) {
+		printf("SENDRECV(len=%d)\n", msglen);
+		hexdump(" OUT: ", msg_out, msglen);
+	}
+
+#ifdef BCMSDYIELD
+	/* Only yield the CPU and wait for interrupt on Rev 8 and newer FPGA images. */
+	yield = ((msglen > 500) && (si->rev >= 8));
+#else
+	yield = FALSE;
+#endif /* BCMSDYIELD */
+
+	ASSERT(msglen % 4 == 0);
+
+
+	SPIPCI_ANDREG(osh, &regs->spih_gpio_data, ~SPIH_CS);	/* Set GPIO CS# Low (asserted) */
+
+	for (count = 0; count < (uint32)msglen/4; count++) {
+		spi_data_out = ((uint32)((uint32 *)msg_out)[count]);
+		SPIPCI_WREG(osh, &regs->spih_data, spi_data_out);
+	}
+
+#ifdef BCMSDYIELD
+	if (yield) {
+		/* Ack the interrupt in the interrupt controller */
+		SPIPCI_WREG(osh, &regs->spih_int_status, SPIH_WFIFO_INTR);
+		SPIPCI_RREG(osh, &regs->spih_int_status);
+
+		/* Enable the FIFO Empty Interrupt */
+		sd->intmask |= SPIH_WFIFO_INTR;
+		sd->got_hcint = FALSE;
+		SPIPCI_WREG(osh, &regs->spih_int_mask, sd->intmask);
+
+	}
+#endif /* BCMSDYIELD */
+
+	/* Wait for write fifo to empty... */
+	SPIPCI_ANDREG(osh, &regs->spih_gpio_data, ~0x00000020);	/* Set GPIO 5 Low */
+
+	if (yield) {
+		ASSERT((SPIPCI_RREG(sd->osh, &regs->spih_stat) & SPIH_WFEMPTY) == 0);
+	}
+
+	spi_waitbits(sd, yield);
+	SPIPCI_ORREG(osh, &regs->spih_gpio_data, 0x00000020);	/* Set GPIO 5 High (de-asserted) */
+
+	for (count = 0; count < (uint32)msglen/4; count++) {
+		spi_data_in = SPIPCI_RREG(osh, &regs->spih_data);
+		((uint32 *)msg_in)[count] = spi_data_in;
+	}
+
+	/* Set GPIO CS# High (de-asserted) */
+	SPIPCI_ORREG(osh, &regs->spih_gpio_data, SPIH_CS);
+
+	if (bcmpcispi_dump) {
+		hexdump(" IN : ", msg_in, msglen);
+	}
+}
+
+void
+spi_spinbits(sdioh_info_t *sd)
+{
+	spih_info_t *si = (spih_info_t *)sd->controller;
+	osl_t *osh = si->osh;
+	spih_regs_t *regs = si->regs;
+	uint spin_count; /* Spin loop bound check */
+
+	spin_count = 0;
+	while ((SPIPCI_RREG(sd->osh, &regs->spih_stat) & SPIH_WFEMPTY) == 0) {
+		if (spin_count > SPI_SPIN_BOUND) {
+			sd_err(("%s: SPIH_WFEMPTY spin bits out of bound %u times \n",
+				__FUNCTION__, spin_count));
+			ASSERT(FALSE);
+		}
+		spin_count++;
+	}
+
+	/* Wait for SPI Transfer state machine to return to IDLE state.
+	 * The state bits are only implemented in Rev >= 5 FPGA.  These
+	 * bits are hardwired to 00 for Rev < 5, so this check doesn't cause
+	 * any problems.
+	 */
+	spin_count = 0;
+	while ((SPIPCI_RREG(osh, &regs->spih_stat) & SPIH_STATE_MASK) != 0) {
+		if (spin_count > SPI_SPIN_BOUND) {
+			sd_err(("%s: SPIH_STATE_MASK spin bits out of bound %u times \n",
+				__FUNCTION__, spin_count));
+			ASSERT(FALSE);
+		}
+		spin_count++;
+	}
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdh.c b/drivers/net/wireless/bcm4329/bcmsdh.c
new file mode 100644
index 0000000..4bf5889
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh.c
@@ -0,0 +1,652 @@
+/*
+ *  BCMSDH interface glue
+ *  implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c,v 1.35.2.1.4.8.6.13 2010/04/06 03:26:57 Exp $
+ */
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h>	/* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h>	/* common SDIO/controller interface */
+#include <sbsdio.h>	/* BRCM sdio device core */
+
+#include <sdio.h>	/* sdio spec */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+
+struct bcmsdh_info
+{
+	bool	init_success;	/* underlying driver successfully attached */
+	void	*sdioh;		/* handler for sdioh */
+	uint32  vendevid;	/* Target Vendor and Device ID on SD bus */
+	osl_t   *osh;
+	bool	regfail;	/* Save status of last reg_read/reg_write call */
+	uint32	sbwad;		/* Save backplane window address */
+};
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+	sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+{
+	bcmsdh_info_t *bcmsdh;
+
+	if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+		BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+
+	/* save the handler locally */
+	l_bcmsdh = bcmsdh;
+
+	if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
+		bcmsdh_detach(osh, bcmsdh);
+		return NULL;
+	}
+
+	bcmsdh->osh = osh;
+	bcmsdh->init_success = TRUE;
+
+	*regsva = (uint32 *)SI_ENUM_BASE;
+
+	/* Report the BAR, to fix if needed */
+	bcmsdh->sbwad = SI_ENUM_BASE;
+	return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bcmsdh != NULL) {
+		if (bcmsdh->sdioh) {
+			sdioh_detach(osh, bcmsdh->sdioh);
+			bcmsdh->sdioh = NULL;
+		}
+		MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+	}
+
+	l_bcmsdh = NULL;
+	return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+                void *params, int plen, void *arg, int len, bool set)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	bool on;
+
+	ASSERT(bcmsdh);
+	status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+	if (SDIOH_API_SUCCESS(status))
+		return FALSE;
+	else
+		return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	ASSERT(sdh);
+	return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	ASSERT(sdh);
+
+	/* don't support yet */
+	return BCME_UNSUPPORTED;
+}
+
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+	uint8 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+	             addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	uint8 *tmp_buf, *tmp_ptr;
+	uint8 *ptr;
+	bool ascii = func & ~0xf;
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cis);
+	ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+	status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+	if (ascii) {
+		/* Move binary bits to tmp and format them into the provided buffer. */
+		if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+			BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+			return BCME_NOMEM;
+		}
+		bcopy(cis, tmp_buf, length);
+		for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+			ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff);
+			if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+				ptr += sprintf((char *)ptr, "\n");
+		}
+		MFREE(bcmsdh->osh, tmp_buf, length);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+static int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address)
+{
+	int err = 0;
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+
+	return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 word = 0;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bar0 != bcmsdh->sbwad) {
+		if (bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))
+			return 0xFFFFFFFF;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+		SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+	/* if ok, return appropriately masked word */
+	if (SDIOH_API_SUCCESS(status)) {
+		switch (size) {
+			case sizeof(uint8):
+				return (word & 0xff);
+			case sizeof(uint16):
+				return (word & 0xffff);
+			case sizeof(uint32):
+				return word;
+			default:
+				bcmsdh->regfail = TRUE;
+
+		}
+	}
+
+	/* otherwise, bad sdio access or invalid size */
+	BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+	return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+	             __FUNCTION__, addr, size*8, data));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bar0 != bcmsdh->sbwad) {
+		if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+			return err;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+	                            addr, &data, size);
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	if (SDIOH_API_SUCCESS(status))
+		return 0;
+
+	BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+	              __FUNCTION__, data, addr, size));
+	return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+	return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	             __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if (bar0 != bcmsdh->sbwad) {
+		if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+			return err;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	            __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if (bar0 != bcmsdh->sbwad) {
+		if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+			return err;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+	ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+	                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+	                              addr, 4, nbytes, buf, NULL);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_stop(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+	return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+	ASSERT(sdh);
+	return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+	return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+	return;
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdh_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_linux.c
new file mode 100644
index 0000000..6d6097b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh_linux.c
@@ -0,0 +1,735 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c,v 1.42.10.10.2.14.4.2 2010/09/15 00:30:11 Exp $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+
+#if defined(OOB_INTR_ONLY)
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(OOB_INTR_ONLY) */
+#if defined(CONFIG_MACH_SANDGATE2G) || defined(CONFIG_MACH_LOGICPD_PXA270)
+#if !defined(BCMPLATFORM_BUS)
+#define BCMPLATFORM_BUS
+#endif /* !defined(BCMPLATFORM_BUS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#include <linux/platform_device.h>
+#endif /* KERNEL_VERSION(2, 6, 19) */
+#endif /* CONFIG_MACH_SANDGATE2G || CONFIG_MACH_LOGICPD_PXA270 */
+
+/**
+ * SDIO Host Controller info
+ */
+typedef struct bcmsdh_hc bcmsdh_hc_t;
+
+struct bcmsdh_hc {
+	bcmsdh_hc_t *next;
+#ifdef BCMPLATFORM_BUS
+	struct device *dev;			/* platform device handle */
+#else
+	struct pci_dev *dev;		/* pci device handle */
+#endif /* BCMPLATFORM_BUS */
+	osl_t *osh;
+	void *regs;			/* SDIO Host Controller address */
+	bcmsdh_info_t *sdh;		/* SDIO Host Controller handle */
+	void *ch;
+	unsigned int oob_irq;
+	unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
+	bool oob_irq_registered;
+#if defined(OOB_INTR_ONLY)
+	spinlock_t irq_lock;
+#endif
+};
+static bcmsdh_hc_t *sdhcinfo = NULL;
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL};
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+	/* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+	/* Check for Arasan host controller */
+	if (vendor == VENDOR_SI_IMAGE) {
+		return (TRUE);
+	}
+	/* Check for BRCM 27XX Standard host controller */
+	if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for BRCM Standard host controller */
+	if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for TI PCIxx21 Standard host controller */
+	if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	/* Ricoh R5C822 Standard SDIO Host */
+	if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+		return (TRUE);
+	}
+	/* JMicron Standard SDIO Host */
+	if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+	/* This is the PciSpiHost. */
+	if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		printf("Found PCI SPI Host Controller\n");
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_SPI */
+
+	return (FALSE);
+}
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+/* forward declarations */
+int bcmsdh_probe(struct device *dev);
+int bcmsdh_remove(struct device *dev);
+
+EXPORT_SYMBOL(bcmsdh_probe);
+EXPORT_SYMBOL(bcmsdh_remove);
+
+#else
+/* forward declarations */
+static int __devinit bcmsdh_probe(struct device *dev);
+static int __devexit bcmsdh_remove(struct device *dev);
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static struct device_driver bcmsdh_driver = {
+	.name		= "pxa2xx-mci",
+	.bus		= &platform_bus_type,
+	.probe		= bcmsdh_probe,
+	.remove		= bcmsdh_remove,
+	.suspend	= NULL,
+	.resume		= NULL,
+	};
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_probe(struct device *dev)
+{
+	osl_t *osh = NULL;
+	bcmsdh_hc_t *sdhc = NULL;
+	ulong regs = 0;
+	bcmsdh_info_t *sdh = NULL;
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+	struct platform_device *pdev;
+	struct resource *r;
+#endif /* BCMLXSDMMC */
+	int irq = 0;
+	uint32 vendevid;
+	unsigned long irq_flags = 0;
+
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+	pdev = to_platform_device(dev);
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!r || irq == NO_IRQ)
+		return -ENXIO;
+#endif /* BCMLXSDMMC */
+
+#if defined(OOB_INTR_ONLY)
+#ifdef HW_OOB
+	irq_flags = \
+		IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+	 irq_flags = IRQF_TRIGGER_FALLING;
+#endif /* HW_OOB */
+	irq = dhd_customer_oob_irq_map(&irq_flags);
+	if  (irq < 0) {
+		SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
+		return 1;
+	}
+#endif /* defined(OOB_INTR_ONLY) */
+	/* allocate SDIO Host Controller state info */
+	if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
+		SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+		SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+			__FUNCTION__,
+			MALLOCED(osh)));
+		goto err;
+	}
+	bzero(sdhc, sizeof(bcmsdh_hc_t));
+	sdhc->osh = osh;
+
+	sdhc->dev = (void *)dev;
+
+#ifdef BCMLXSDMMC
+	if (!(sdh = bcmsdh_attach(osh, (void *)0,
+	                          (void **)&regs, irq))) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+#else
+	if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
+	                          (void **)&regs, irq))) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+#endif /* BCMLXSDMMC */
+	sdhc->sdh = sdh;
+	sdhc->oob_irq = irq;
+	sdhc->oob_flags = irq_flags;
+	sdhc->oob_irq_registered = FALSE;	/* to make sure.. */
+#if defined(OOB_INTR_ONLY)
+	spin_lock_init(&sdhc->irq_lock);
+#endif
+
+	/* chain SDIO Host Controller info together */
+	sdhc->next = sdhcinfo;
+	sdhcinfo = sdhc;
+	/* Read the vendor/device ID from the CIS */
+	vendevid = bcmsdh_query_device(sdh);
+
+	/* try to attach to the target device */
+	if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
+	                                 (vendevid & 0xFFFF), 0, 0, 0, 0,
+	                                (void *)regs, NULL, sdh))) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	return 0;
+
+	/* error handling */
+err:
+	if (sdhc) {
+		if (sdhc->sdh)
+			bcmsdh_detach(sdhc->osh, sdhc->sdh);
+		MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	}
+	if (osh)
+		osl_detach(osh);
+	return -ENODEV;
+}
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_remove(struct device *dev)
+{
+	bcmsdh_hc_t *sdhc, *prev;
+	osl_t *osh;
+
+	sdhc = sdhcinfo;
+	drvinfo.detach(sdhc->ch);
+	bcmsdh_detach(sdhc->osh, sdhc->sdh);
+	/* find the SDIO Host Controller state for this pdev and take it out from the list */
+	for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+		if (sdhc->dev == (void *)dev) {
+			if (prev)
+				prev->next = sdhc->next;
+			else
+				sdhcinfo = NULL;
+			break;
+		}
+		prev = sdhc;
+	}
+	if (!sdhc) {
+		SDLX_MSG(("%s: failed\n", __FUNCTION__));
+		return 0;
+	}
+
+
+	/* release SDIO Host Controller info */
+	osh = sdhc->osh;
+	MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	osl_detach(osh);
+
+#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
+	dev_set_drvdata(dev, NULL);
+#endif /* !defined(BCMLXSDMMC) */
+
+	return 0;
+}
+
+#else /* BCMPLATFORM_BUS */
+
+#if !defined(BCMLXSDMMC)
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+	{ vendor: PCI_ANY_ID,
+	device: PCI_ANY_ID,
+	subvendor: PCI_ANY_ID,
+	subdevice: PCI_ANY_ID,
+	class: 0,
+	class_mask: 0,
+	driver_data: 0,
+	},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+	node:		{},
+	name:		"bcmsdh",
+	id_table:	bcmsdh_pci_devid,
+	probe:		bcmsdh_pci_probe,
+	remove:		bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	save_state:	NULL,
+#endif
+	suspend:	NULL,
+	resume:		NULL,
+};
+
+
+extern uint sd_pci_slot;	/* Force detection to a particular PCI */
+				/* slot only . Allows for having multiple */
+				/* WL devices at once in a PC */
+				/* Only one instance of dhd will be */
+				/* usable at a time */
+				/* Upper word is bus number, */
+				/* lower word is slot number */
+				/* Default value of 0xFFFFffff turns this */
+				/* off */
+module_param(sd_pci_slot, uint, 0);
+
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller.  If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	osl_t *osh = NULL;
+	bcmsdh_hc_t *sdhc = NULL;
+	ulong regs;
+	bcmsdh_info_t *sdh = NULL;
+	int rc;
+
+	if (sd_pci_slot != 0xFFFFffff) {
+		if (pdev->bus->number != (sd_pci_slot>>16) ||
+			PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+			SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+			          __FUNCTION__,
+			          bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
+			          "Found compatible SDIOHC" :
+			          "Probing unknown device",
+			          pdev->bus->number, PCI_SLOT(pdev->devfn),
+			          pdev->vendor, pdev->device));
+			return -ENODEV;
+		}
+		SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+		          __FUNCTION__,
+		          bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
+		          "Using compatible SDIOHC" :
+		          "WARNING, forced use of unkown device",
+		          pdev->bus->number, PCI_SLOT(pdev->devfn),
+		          pdev->vendor, pdev->device));
+	}
+
+	if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+	    (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+		uint32 config_reg;
+
+		SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+			SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+			goto err;
+		}
+
+		config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+		/*
+		 * Set MMC_SD_DIS bit in FlashMedia Controller.
+		 * Disbling the SD/MMC Controller in the FlashMedia Controller
+		 * allows the Standard SD Host Controller to take over control
+		 * of the SD Slot.
+		 */
+		config_reg |= 0x02;
+		OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+		osl_detach(osh);
+	}
+	/* match this pci device with what we support */
+	/* we can't solely rely on this to believe it is our SDIO Host Controller! */
+	if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+		return -ENODEV;
+	}
+
+	/* this is a pci device we might support */
+	SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+		__FUNCTION__,
+		pdev->bus->number, PCI_SLOT(pdev->devfn),
+		PCI_FUNC(pdev->devfn), pdev->irq));
+
+	/* use bcmsdh_query_device() to get the vendor ID of the target device so
+	 * it will eventually appear in the Broadcom string on the console
+	 */
+
+	/* allocate SDIO Host Controller state info */
+	if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+		SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+		SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+			__FUNCTION__,
+			MALLOCED(osh)));
+		goto err;
+	}
+	bzero(sdhc, sizeof(bcmsdh_hc_t));
+	sdhc->osh = osh;
+
+	sdhc->dev = pdev;
+
+	/* map to address where host can access */
+	pci_set_master(pdev);
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+		goto err;
+	}
+	if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
+	                          (void **)&regs, pdev->irq))) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	sdhc->sdh = sdh;
+
+	/* try to attach to the target device */
+	if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
+	                                bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
+	                                (void *)regs, NULL, sdh))) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* chain SDIO Host Controller info together */
+	sdhc->next = sdhcinfo;
+	sdhcinfo = sdhc;
+
+	return 0;
+
+	/* error handling */
+err:
+	if (sdhc->sdh)
+		bcmsdh_detach(sdhc->osh, sdhc->sdh);
+	if (sdhc)
+		MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	if (osh)
+		osl_detach(osh);
+	return -ENODEV;
+}
+
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+	bcmsdh_hc_t *sdhc, *prev;
+	osl_t *osh;
+
+	/* find the SDIO Host Controller state for this pdev and take it out from the list */
+	for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+		if (sdhc->dev == pdev) {
+			if (prev)
+				prev->next = sdhc->next;
+			else
+				sdhcinfo = NULL;
+			break;
+		}
+		prev = sdhc;
+	}
+	if (!sdhc)
+		return;
+
+	drvinfo.detach(sdhc->ch);
+
+	bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+	/* release SDIO Host Controller info */
+	osh = sdhc->osh;
+	MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	osl_detach(osh);
+}
+#endif /* BCMLXSDMMC */
+#endif /* BCMPLATFORM_BUS */
+
+extern int sdio_function_init(void);
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+	int error = 0;
+
+	drvinfo = *driver;
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+	SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+	error = sdio_function_init();
+#else
+	SDLX_MSG(("Intel PXA270 SDIO Driver\n"));
+	error = driver_register(&bcmsdh_driver);
+#endif /* defined(BCMLXSDMMC) */
+	return error;
+#endif /* defined(BCMPLATFORM_BUS) */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (!(error = pci_module_init(&bcmsdh_pci_driver)))
+		return 0;
+#else
+	if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
+		return 0;
+#endif
+
+	SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#endif /* BCMPLATFORM_BUS */
+
+	return error;
+}
+
+extern void sdio_function_cleanup(void);
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (bcmsdh_pci_driver.node.next)
+#endif
+
+#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+		driver_unregister(&bcmsdh_driver);
+#endif
+#if defined(BCMLXSDMMC)
+	sdio_function_cleanup();
+#endif /* BCMLXSDMMC */
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+		pci_unregister_driver(&bcmsdh_pci_driver);
+#endif /* BCMPLATFORM_BUS */
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bool enable)
+{
+	static bool curstate = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
+	if (curstate != enable) {
+		if (enable)
+			enable_irq(sdhcinfo->oob_irq);
+		else
+			disable_irq_nosync(sdhcinfo->oob_irq);
+		curstate = enable;
+	}
+	spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+	dhd_pub_t *dhdp;
+
+	dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
+
+	bcmsdh_oob_intr_set(0);
+
+	if (dhdp == NULL) {
+		SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
+		return IRQ_HANDLED;
+	}
+
+	dhdsdio_isr((void *)dhdp->bus);
+
+	return IRQ_HANDLED;
+}
+
+int bcmsdh_register_oob_intr(void * dhdp)
+{
+	int error = 0;
+
+	SDLX_MSG(("%s Enter\n", __FUNCTION__));
+
+/* Example of  HW_OOB for HW2: please refer to your host  specifiction */
+/* sdhcinfo->oob_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
+
+	dev_set_drvdata(sdhcinfo->dev, dhdp);
+
+	if (!sdhcinfo->oob_irq_registered) {
+		SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__, \
+				(int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
+		/* Refer to customer Host IRQ docs about proper irqflags definition */
+		error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
+			"bcmsdh_sdmmc", NULL);
+		if (error)
+			return -ENODEV;
+
+		enable_irq_wake(sdhcinfo->oob_irq);
+		sdhcinfo->oob_irq_registered = TRUE;
+	}
+
+	return 0;
+}
+
+void bcmsdh_set_irq(int flag)
+{
+	if (sdhcinfo->oob_irq_registered) {
+		SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+		if (flag) {
+			enable_irq(sdhcinfo->oob_irq);
+			enable_irq_wake(sdhcinfo->oob_irq);
+		} else {
+			disable_irq_wake(sdhcinfo->oob_irq);
+			disable_irq(sdhcinfo->oob_irq);
+		}
+	}
+}
+
+void bcmsdh_unregister_oob_intr(void)
+{
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+
+	if (sdhcinfo->oob_irq_registered) {
+		disable_irq_wake(sdhcinfo->oob_irq);
+		disable_irq(sdhcinfo->oob_irq);	/* just in case.. */
+		free_irq(sdhcinfo->oob_irq, NULL);
+		sdhcinfo->oob_irq_registered = FALSE;
+	}
+}
+#endif /* defined(OOB_INTR_ONLY) */
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel;	/* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power;	/* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock;	/* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor;	/* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode;	/* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok;	/* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c
new file mode 100644
index 0000000..031367b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c
@@ -0,0 +1,1304 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c,v 1.1.2.5.6.30.4.1 2010/09/02 23:12:21 Exp $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = 512;		/* Default blocksize */
+
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK	0x03
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+	int err_ret;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Enable Function 1 */
+	sdio_claim_host(gInstance->func[1]);
+	err_ret = sdio_enable_func(gInstance->func[1]);
+	sdio_release_host(gInstance->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+	}
+
+	return FALSE;
+}
+
+/*
+ *	Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+	int err_ret;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (gInstance == NULL) {
+		sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (sdioh_sdmmc_osinit(sd) != 0) {
+		sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->num_funcs = 2;
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->client_block_size[0] = 64;
+
+	gInstance->sd = sd;
+
+	/* Claim host controller */
+	sdio_claim_host(gInstance->func[1]);
+
+	sd->client_block_size[1] = 64;
+	err_ret = sdio_set_block_size(gInstance->func[1], 64);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+	}
+
+	/* Release host controller F1 */
+	sdio_release_host(gInstance->func[1]);
+
+	if (gInstance->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(gInstance->func[2]);
+
+		sd->client_block_size[2] = sd_f2_blocksize;
+		err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
+		if (err_ret) {
+			sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
+				sd_f2_blocksize));
+		}
+
+		/* Release host controller F2 */
+		sdio_release_host(gInstance->func[2]);
+	}
+
+	sdioh_sdmmc_card_enablefuncs(sd);
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+
+		/* Disable Function 2 */
+		sdio_claim_host(gInstance->func[2]);
+		sdio_disable_func(gInstance->func[2]);
+		sdio_release_host(gInstance->func[2]);
+
+		/* Disable Function 1 */
+		sdio_claim_host(gInstance->func[1]);
+		sdio_disable_func(gInstance->func[1]);
+		sdio_release_host(gInstance->func[1]);
+
+		/* deregister irq */
+		sdioh_sdmmc_osfree(sd);
+
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(void)
+{
+	uint8 reg;
+	int err;
+
+	if (gInstance->func[0]) {
+		sdio_claim_host(gInstance->func[0]);
+
+		reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+		if (err) {
+			sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			sdio_release_host(gInstance->func[0]);
+			return SDIOH_API_RC_FAIL;
+		}
+
+		/* Enable F1 and F2 interrupts, set master enable */
+		reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
+
+		sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+		sdio_release_host(gInstance->func[0]);
+
+		if (err) {
+			sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(void)
+{
+	uint8 reg;
+	int err;
+
+	if (gInstance->func[0]) {
+		sdio_claim_host(gInstance->func[0]);
+		reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+		if (err) {
+			sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			sdio_release_host(gInstance->func[0]);
+			return SDIOH_API_RC_FAIL;
+		}
+
+		reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+		/* Disable master interrupt with the last function interrupt */
+		if (!(reg & 0xFE))
+			reg = 0;
+		sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+
+		sdio_release_host(gInstance->func[0]);
+		if (err) {
+			sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	if (fn == NULL) {
+		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	/* register and unmask irq */
+	if (gInstance->func[2]) {
+		sdio_claim_host(gInstance->func[2]);
+		sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+		sdio_release_host(gInstance->func[2]);
+	}
+
+	if (gInstance->func[1]) {
+		sdio_claim_host(gInstance->func[1]);
+		sdio_claim_irq(gInstance->func[1], IRQHandler);
+		sdio_release_host(gInstance->func[1]);
+	}
+#elif defined(HW_OOB)
+	sdioh_enable_func_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+	if (gInstance->func[1]) {
+		/* register and unmask irq */
+		sdio_claim_host(gInstance->func[1]);
+		sdio_release_irq(gInstance->func[1]);
+		sdio_release_host(gInstance->func[1]);
+	}
+
+	if (gInstance->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(gInstance->func[2]);
+		sdio_release_irq(gInstance->func[2]);
+		/* Release host controller F2 */
+		sdio_release_host(gInstance->func[2]);
+	}
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+	sdioh_disable_func_intr();
+#endif /*  !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel", IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE, 0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints", 	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG, 	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode", 	IOV_SDMODE, 	0,	IOVT_UINT32,	100},
+	{"sd_highspeed", IOV_HISPEED,	0,	IOVT_UINT32,	0 },
+	{"sd_rxchain",  IOV_RXCHAIN,    0, 	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                           void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		si->client_block_size[func] = blksize;
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RXCHAIN):
+		int_val = FALSE;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+		else if (sd_ptr->offset & 2)
+			int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+		else
+			int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = 0;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+	SDIOH_API_RC status;
+	uint8 data;
+
+	if (enable)
+		data = 3;	/* enable hw oob interrupt */
+	else
+		data = 4;	/* disable hw oob interrupt */
+	data |= 4;		/* Active HIGH */
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, 0xf2, &data);
+	return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int err_ret;
+
+	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	if(rw) { /* CMD52 Write */
+		if (func == 0) {
+			/* Can only directly write to some F0 registers.  Handle F2 enable
+			 * as a special case.
+			 */
+			if (regaddr == SDIOD_CCCR_IOEN) {
+				if (gInstance->func[2]) {
+					sdio_claim_host(gInstance->func[2]);
+					if (*byte & SDIO_FUNC_ENABLE_2) {
+						/* Enable Function 2 */
+						err_ret = sdio_enable_func(gInstance->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+								err_ret));
+						}
+					} else {
+						/* Disable Function 2 */
+						err_ret = sdio_disable_func(gInstance->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+								err_ret));
+						}
+					}
+					sdio_release_host(gInstance->func[2]);
+				}
+			}
+#if defined(MMC_SDIO_ABORT)
+			/* to allow abort command through F1 */
+			else if (regaddr == SDIOD_CCCR_IOABORT) {
+				sdio_claim_host(gInstance->func[func]);
+				/*
+				* this sdio_f0_writeb() can be replaced with another api
+				* depending upon MMC driver change.
+				* As of this time, this is temporaray one
+				*/
+				sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(gInstance->func[func]);
+			}
+#endif /* MMC_SDIO_ABORT */
+			else if (regaddr < 0xF0) {
+				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+			} else {
+				/* Claim host controller, perform F0 write, and release */
+				sdio_claim_host(gInstance->func[func]);
+				sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(gInstance->func[func]);
+			}
+		} else {
+			/* Claim host controller, perform Fn write, and release */
+			sdio_claim_host(gInstance->func[func]);
+			sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+			sdio_release_host(gInstance->func[func]);
+		}
+	} else { /* CMD52 Read */
+		/* Claim host controller, perform Fn read, and release */
+		sdio_claim_host(gInstance->func[func]);
+
+		if (func == 0) {
+			*byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+		} else {
+			*byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+		}
+
+		sdio_release_host(gInstance->func[func]);
+	}
+
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+		                        rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                                   uint32 *word, uint nbytes)
+{
+	int err_ret = SDIOH_API_RC_FAIL;
+
+	if (func == 0) {
+		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Claim host controller */
+	sdio_claim_host(gInstance->func[func]);
+
+	if(rw) { /* CMD52 Write */
+		if (nbytes == 4) {
+			sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+		} else if (nbytes == 2) {
+			sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	} else { /* CMD52 Read */
+		if (nbytes == 4) {
+			*word = sdio_readl(gInstance->func[func], addr, &err_ret);
+		} else if (nbytes == 2) {
+			*word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	}
+
+	/* Release host controller */
+	sdio_release_host(gInstance->func[func]);
+
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+		                        rw ? "Write" : "Read", err_ret));
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, void *pkt)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	uint32	SGCount = 0;
+	int err_ret = 0;
+
+	void *pnext;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(pkt);
+	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	/* Claim host controller */
+	sdio_claim_host(gInstance->func[func]);
+	for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+		uint pkt_len = PKTLEN(sd->osh, pnext);
+		pkt_len += 3;
+		pkt_len &= 0xFFFFFFFC;
+
+#ifdef CONFIG_MMC_MSM7X00A
+		if ((pkt_len % 64) == 32) {
+			sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+			pkt_len += 32;
+		}
+#endif /* CONFIG_MMC_MSM7X00A */
+		/* Make sure the packet is aligned properly. If it isn't, then this
+		 * is the fault of sdioh_request_buffer() which is supposed to give
+		 * us something we can work with.
+		 */
+		ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0);
+
+		if ((write) && (!fifo)) {
+			err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				pkt_len);
+		} else if (write) {
+			err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				pkt_len);
+		} else if (fifo) {
+			err_ret = sdio_readsb(gInstance->func[func],
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				addr,
+				pkt_len);
+		} else {
+			err_ret = sdio_memcpy_fromio(gInstance->func[func],
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				addr,
+				pkt_len);
+		}
+
+		if (err_ret) {
+			sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+				__FUNCTION__,
+				(write) ? "TX" : "RX",
+				pnext, SGCount, addr, pkt_len, err_ret));
+		} else {
+			sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+				__FUNCTION__,
+				(write) ? "TX" : "RX",
+				pnext, SGCount, addr, pkt_len));
+		}
+
+		if (!fifo) {
+			addr += pkt_len;
+		}
+		SGCount ++;
+
+	}
+
+	/* Release host controller */
+	sdio_release_host(gInstance->func[func]);
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
+ * then all the packets in the chain must be properly aligned.  If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	SDIOH_API_RC Status;
+	void *mypkt = NULL;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Case 1: we don't have a packet. */
+	if (pkt == NULL) {
+		sd_data(("%s: Creating new %s Packet, len=%d\n",
+		         __FUNCTION__, write ? "TX" : "RX", buflen_u));
+#ifdef DHD_USE_STATIC_BUF
+		if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#else
+		if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+			sd_err(("%s: PKTGET failed: len %d\n",
+			           __FUNCTION__, buflen_u));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		/* For a write, copy the buffer data into the packet. */
+		if (write) {
+			bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
+		}
+
+		Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+		/* For a read, copy the packet data back to the buffer. */
+		if (!write) {
+			bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
+		}
+#ifdef DHD_USE_STATIC_BUF
+		PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+		PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+	} else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
+		/* Case 2: We have a packet, but it is unaligned. */
+
+		/* In this case, we cannot have a chain. */
+		ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+		sd_data(("%s: Creating aligned %s Packet, len=%d\n",
+		         __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
+#ifdef DHD_USE_STATIC_BUF
+		if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#else
+		if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+			sd_err(("%s: PKTGET failed: len %d\n",
+			           __FUNCTION__, PKTLEN(sd->osh, pkt)));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		/* For a write, copy the buffer data into the packet. */
+		if (write) {
+			bcopy(PKTDATA(sd->osh, pkt),
+			      PKTDATA(sd->osh, mypkt),
+			      PKTLEN(sd->osh, pkt));
+		}
+
+		Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+		/* For a read, copy the packet data back to the buffer. */
+		if (!write) {
+			bcopy(PKTDATA(sd->osh, mypkt),
+			      PKTDATA(sd->osh, pkt),
+			      PKTLEN(sd->osh, mypkt));
+		}
+#ifdef DHD_USE_STATIC_BUF
+		PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+		PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+	} else { /* case 3: We have a packet and it is aligned. */
+		sd_data(("%s: Aligned %s Packet, direct DMA\n",
+		         __FUNCTION__, write ? "Tx" : "Rx"));
+		Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+	}
+
+	return (Status);
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+	char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+	/* issue abort cmd52 command through F1 */
+	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp = 0;
+
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		*data = temp;
+		*data &= 0xff;
+		sd_data(("%s: byte read data=0x%02x\n",
+		         __FUNCTION__, *data));
+	} else {
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+		if (regsize == 2)
+			*data &= 0xffff;
+
+		sd_data(("%s: word read data=0x%08x\n",
+		         __FUNCTION__, *data));
+	}
+
+	return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+	sd = gInstance->sd;
+
+	ASSERT(sd != NULL);
+	sdio_release_host(gInstance->func[0]);
+
+	if (sd->use_client_ints) {
+		sd->intrcount++;
+		ASSERT(sd->intr_handler);
+		ASSERT(sd->intr_handler_arg);
+		(sd->intr_handler)(sd->intr_handler_arg);
+	} else {
+		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+	}
+
+	sdio_claim_host(gInstance->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+
+	sd = gInstance->sd;
+
+	ASSERT(sd != NULL);
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp;
+
+		temp = data & 0xff;
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		sd_data(("%s: byte write data=0x%02x\n",
+		         __FUNCTION__, data));
+	} else {
+		if (regsize == 2)
+			data &= 0xffff;
+
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+		sd_data(("%s: word write data=0x%08x\n",
+		         __FUNCTION__, data));
+	}
+
+	return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *si, int stage)
+{
+	int ret;
+	sdioh_info_t *sd = gInstance->sd;
+
+	/* Need to do this stages as we can't enable the interrupt till
+		downloading of the firmware is complete, other wise polling
+		sdio access will come in way
+	*/
+	if (gInstance->func[0]) {
+			if (stage == 0) {
+		/* Since the power to the chip is killed, we will have
+			re enumerate the device again. Set the block size
+			and enable the fucntion 1 for in preparation for
+			downloading the code
+		*/
+		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
+		   patch for it
+		*/
+		if ((ret = sdio_reset_comm(gInstance->func[0]->card)))
+			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+		else {
+			sd->num_funcs = 2;
+			sd->sd_blockmode = TRUE;
+			sd->use_client_ints = TRUE;
+			sd->client_block_size[0] = 64;
+
+			/* Claim host controller */
+			sdio_claim_host(gInstance->func[1]);
+
+			sd->client_block_size[1] = 64;
+			if (sdio_set_block_size(gInstance->func[1], 64)) {
+				sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+			}
+
+			/* Release host controller F1 */
+			sdio_release_host(gInstance->func[1]);
+
+			if (gInstance->func[2]) {
+				/* Claim host controller F2 */
+				sdio_claim_host(gInstance->func[2]);
+
+				sd->client_block_size[2] = sd_f2_blocksize;
+				if (sdio_set_block_size(gInstance->func[2],
+					sd_f2_blocksize)) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+						"blocksize to %d\n", sd_f2_blocksize));
+				}
+
+				/* Release host controller F2 */
+				sdio_release_host(gInstance->func[2]);
+			}
+
+			sdioh_sdmmc_card_enablefuncs(sd);
+			}
+		} else {
+#if !defined(OOB_INTR_ONLY)
+			sdio_claim_host(gInstance->func[0]);
+			sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+			sdio_claim_irq(gInstance->func[1], IRQHandler);
+			sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+			sdioh_enable_func_intr();
+#endif
+			bcmsdh_oob_intr_set(TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+		}
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+
+	return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *si)
+{
+	/* MSM7201A Android sdio stack has bug with interrupt
+		So internaly within SDIO stack they are polling
+		which cause issue when device is turned off. So
+		unregister interrupt with SDIO stack to stop the
+		polling
+	*/
+	if (gInstance->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+		sdio_claim_host(gInstance->func[0]);
+		sdio_release_irq(gInstance->func[1]);
+		sdio_release_irq(gInstance->func[2]);
+		sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+		sdioh_disable_func_intr();
+#endif
+		bcmsdh_oob_intr_set(FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+	return (0);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c
new file mode 100644
index 0000000..5a1a46c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,269 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c,v 1.1.2.5.6.17 2010/08/13 00:36:19 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM		0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT	0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB	0x0492	/* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325	0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319	0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+
+#include <bcmsdh_sdmmc.h>
+
+#include <dhd_dbg.h>
+
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+PBCMSDH_SDMMC_INSTANCE gInstance;
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	int ret = 0;
+	static struct sdio_func sdio_func_0;
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_trace(("sdio_device: 0x%04x\n", func->device));
+	sd_trace(("Function#: 0x%04x\n", func->num));
+
+	if (func->num == 1) {
+		sdio_func_0.num = 0;
+		sdio_func_0.card = func->card;
+		gInstance->func[0] = &sdio_func_0;
+		if(func->device == 0x4) { /* 4318 */
+			gInstance->func[2] = NULL;
+			sd_trace(("NIC found, calling bcmsdh_probe...\n"));
+			ret = bcmsdh_probe(&func->dev);
+		}
+	}
+
+	gInstance->func[func->num] = func;
+
+	if (func->num == 2) {
+		sd_trace(("F2 found, calling bcmsdh_probe...\n"));
+		ret = bcmsdh_probe(&func->dev);
+	}
+
+	return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	if (func->num == 2) {
+		sd_trace(("F2 found, calling bcmsdh_remove...\n"));
+		bcmsdh_remove(&func->dev);
+	}
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+	.probe		= bcmsdh_sdmmc_probe,
+	.remove		= bcmsdh_sdmmc_remove,
+	.name		= "bcmsdh_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+	};
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+};
+
+
+int
+sdioh_sdmmc_osinit(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+	sd->sdos_info = (void*)sdos;
+	if (sdos == NULL)
+		return BCME_NOMEM;
+
+	sdos->sd = sd;
+	spin_lock_init(&sdos->lock);
+	return BCME_OK;
+}
+
+void
+sdioh_sdmmc_osfree(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+	ASSERT(sd && sd->sdos_info);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+#if !defined(OOB_INTR_ONLY)
+	if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+		sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	/* Ensure atomicity for enable/disable calls */
+	spin_lock_irqsave(&sdos->lock, flags);
+
+	sd->client_intr_enabled = enable;
+	if (enable) {
+		sdioh_sdmmc_devintr_on(sd);
+	} else {
+		sdioh_sdmmc_devintr_off(sd);
+	}
+
+	spin_unlock_irqrestore(&sdos->lock, flags);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+	int error = 0;
+	sdio_function_init();
+	return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+	sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int sdio_function_init(void)
+{
+	int error = 0;
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+
+	gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
+	if (!gInstance)
+		return -ENOMEM;
+
+	error = sdio_register_driver(&bcmsdh_sdmmc_driver);
+
+	return error;
+}
+
+/*
+ * module cleanup
+*/
+extern int bcmsdh_remove(struct device *dev);
+void sdio_function_cleanup(void)
+{
+	sd_trace(("%s Enter\n", __FUNCTION__));
+
+	sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+
+	if (gInstance)
+		kfree(gInstance);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdspi.c b/drivers/net/wireless/bcm4329/bcmsdspi.c
new file mode 100644
index 0000000..636539b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdspi.c
@@ -0,0 +1,1596 @@
+/*
+ * Broadcom BCMSDH to SPI Protocol Conversion Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.c,v 1.14.4.2.4.4.6.5 2010/03/10 03:09:48 Exp $
+ */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <siutils.h>
+#include <sdio.h>		/* SDIO Device and Protocol Specs */
+#include <sdioh.h>		/* SDIO Host Controller Specification */
+#include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>		/* ioctl/iovars */
+
+#include <pcicfg.h>
+
+
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+#include <proto/sdspi.h>
+
+#define SD_PAGE 4096
+
+/* Globals */
+
+uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_hiok = FALSE;		/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI;		/* Use SD4 mode by default */
+uint sd_f2_blocksize = 512;	/* Default blocksize */
+
+uint sd_divisor = 2;		/* Default 33MHz/2 = 16MHz for dongle */
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_crc = 0;		/* Default to SPI CRC Check turned OFF */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+
+uint sd_toctl = 7;
+
+/* Prototypes */
+static bool sdspi_start_power(sdioh_info_t *sd);
+static int sdspi_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
+static int sdspi_card_enablefuncs(sdioh_info_t *sd);
+static void sdspi_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
+static int sdspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg,
+                           uint32 *data, uint32 datalen);
+static int sdspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int sdspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int sdspi_driver_init(sdioh_info_t *sd);
+static bool sdspi_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
+static int sdspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int sdspi_abort(sdioh_info_t *sd, uint func);
+
+static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
+
+static uint8 sdspi_crc7(unsigned char* p, uint32 len);
+static uint16 sdspi_crc16(unsigned char* p, uint32 len);
+static int sdspi_crc_onoff(sdioh_info_t *sd, bool use_crc);
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+
+	if (spi_osinit(sd) != 0) {
+		sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->bar0 = (uintptr)bar0;
+	sd->irq = irq;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->sd_blockmode = FALSE;
+	sd->use_client_ints = TRUE;
+	sd->sd_use_dma = FALSE;	/* DMA Not supported */
+
+	/* Haven't figured out how to make bytemode work with dma */
+	if (!sd->sd_blockmode)
+		sd->sd_use_dma = 0;
+
+	if (!spi_hw_attach(sd)) {
+		sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	if (sdspi_driver_init(sd) != SUCCESS) {
+		if (sdspi_driver_init(sd) != SUCCESS) {
+			sd_err(("%s:sdspi_driver_init() failed()\n", __FUNCTION__));
+			spi_hw_detach(sd);
+			spi_osfree(sd);
+			MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+			return (NULL);
+		}
+	}
+
+	if (spi_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+		if (sd->card_init_done)
+			sdspi_reset(sd, 1, 1);
+
+		sd_info(("%s: detaching from hardware\n", __FUNCTION__));
+		spi_free_irq(sd->irq, sd);
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+	*onoff = sd->client_intr_enabled;
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return 0;
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_CRC
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE,	0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_crc",	IOV_CRC,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		if (!si->sd_blockmode)
+			si->sd_use_dma = 0;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		spi_lock(si);
+		bcmerror = set_client_block_size(si, func, blksize);
+		spi_unlock(si);
+		break;
+	}
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!spi_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("set clock failed!\n"));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CRC):
+		int_val = (uint32)sd_crc;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CRC):
+		/* Apply new setting, but don't change sd_crc until
+		 * after the CRC-mode is selected in the device.  This
+		 * is required because the software must generate a
+		 * correct CRC for the CMD59 in order to be able to
+		 * turn OFF the CRC.
+		 */
+		sdspi_crc_onoff(si, int_val ? 1 : 0);
+		sd_crc = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+
+		if (!sdspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+			sd_err(("Failed changing highspeed mode to %d.\n", sd_hiok));
+			bcmerror = BCME_ERROR;
+			return ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sd_err(("IOV_HOSTREG unsupported\n"));
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		return SDIOH_API_RC_FAIL;
+	}
+
+	spi_lock(sd);
+	*cis = 0;
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdspi_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 rsp5;
+
+	spi_lock(sd);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+	cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
+
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x\n", __FUNCTION__, rw, func, regaddr));
+
+	if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+	                              SDIOH_CMD_52, cmd_arg, NULL, 0)) != SUCCESS) {
+		spi_unlock(sd);
+		return status;
+	}
+
+	sdspi_cmd_getrsp(sd, &rsp5, 1);
+	if (rsp5 != 0x00) {
+		sd_err(("%s: rsp5 flags is 0x%x func=%d\n",
+		        __FUNCTION__, rsp5, func));
+		/* ASSERT(0); */
+		spi_unlock(sd);
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (rw == SDIOH_READ)
+		*byte = sd->card_rsp_data >> 24;
+
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+
+	spi_lock(sd);
+
+	if (rw == SDIOH_READ)
+		status = sdspi_card_regread(sd, func, addr, nbytes, word);
+	else
+		status = sdspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+	spi_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+	spi_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks:
+	 * Bytemode: 1 block at a time.
+	 */
+	while (buflen > 0) {
+		if (sd->sd_blockmode) {
+			/* Max xfer is Page size */
+			len = MIN(SD_PAGE, buflen);
+
+			/* Round down to a block boundry */
+			if (buflen > sd->client_block_size[func])
+				len = (len/sd->client_block_size[func]) *
+				        sd->client_block_size[func];
+		} else {
+			/* Byte mode: One block at a time */
+			len = MIN(sd->client_block_size[func], buflen);
+		}
+
+		if (sdspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+static int
+sdspi_abort(sdioh_info_t *sd, uint func)
+{
+	uint8 spi_databuf[] = { 0x74, 0x80, 0x00, 0x0C, 0xFF, 0x95, 0xFF, 0xFF,
+	                        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	uint8 spi_rspbuf[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	                       0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	int err = 0;
+
+	sd_err(("Sending SPI Abort to F%d\n", func));
+	spi_databuf[4] = func & 0x7;
+	/* write to function 0, addr 6 (IOABORT) func # in 3 LSBs. */
+	spi_sendrecv(sd, spi_databuf, spi_rspbuf, sizeof(spi_databuf));
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint fnum)
+{
+	int ret;
+
+	spi_lock(sd);
+	ret = sdspi_abort(sd, fnum);
+	spi_unlock(sd);
+
+	return ret;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+
+/*
+ * Private/Static work routines
+ */
+static bool
+sdspi_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
+{
+	if (!sd)
+		return TRUE;
+
+	spi_lock(sd);
+	/* Reset client card */
+	if (client_reset && (sd->adapter_slot != -1)) {
+		if (sdspi_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
+			sd_err(("%s: Cannot write to card reg 0x%x\n",
+			        __FUNCTION__, SDIOD_CCCR_IOABORT));
+		else
+			sd->card_rca = 0;
+	}
+
+	/* The host reset is a NOP in the sd-spi case. */
+	if (host_reset) {
+		sd->sd_mode = SDIOH_MODE_SPI;
+	}
+	spi_unlock(sd);
+	return TRUE;
+}
+
+static int
+sdspi_host_init(sdioh_info_t *sd)
+{
+	sdspi_reset(sd, 1, 0);
+
+	/* Default power on mode is SD1 */
+	sd->sd_mode = SDIOH_MODE_SPI;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = 1;
+
+	return (SUCCESS);
+}
+
+#define CMD0_RETRIES 3
+#define CMD5_RETRIES 10
+
+static int
+get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
+{
+	uint32 rsp5;
+	int retries, status;
+
+	/* First issue a CMD0 to get the card into SPI mode. */
+	for (retries = 0; retries <= CMD0_RETRIES; retries++) {
+		if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+		                              SDIOH_CMD_0, *cmd_arg, NULL, 0)) != SUCCESS) {
+			sd_err(("%s: No response to CMD0\n", __FUNCTION__));
+			continue;
+		}
+
+		sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+		if (GFIELD(rsp5, SPI_RSP_ILL_CMD)) {
+			printf("%s: Card already initialized (continuing)\n", __FUNCTION__);
+			break;
+		}
+
+		if (GFIELD(rsp5, SPI_RSP_IDLE)) {
+			printf("%s: Card in SPI mode\n", __FUNCTION__);
+			break;
+		}
+	}
+
+	if (retries > CMD0_RETRIES) {
+		sd_err(("%s: Too many retries for CMD0\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	/* Get the Card's Operation Condition. */
+	/* Occasionally the board takes a while to become ready. */
+	for (retries = 0; retries <= CMD5_RETRIES; retries++) {
+		if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+		                              SDIOH_CMD_5, *cmd_arg, NULL, 0)) != SUCCESS) {
+			sd_err(("%s: No response to CMD5\n", __FUNCTION__));
+			continue;
+		}
+
+		printf("CMD5 response data was: 0x%08x\n", sd->card_rsp_data);
+
+		if (GFIELD(sd->card_rsp_data, RSP4_CARD_READY)) {
+			printf("%s: Card ready\n", __FUNCTION__);
+			break;
+		}
+	}
+
+	if (retries > CMD5_RETRIES) {
+		sd_err(("%s: Too many retries for CMD5\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	*cmd_rsp = sd->card_rsp_data;
+
+	sdspi_crc_onoff(sd, sd_crc ? 1 : 0);
+
+	return (SUCCESS);
+}
+
+static int
+sdspi_crc_onoff(sdioh_info_t *sd, bool use_crc)
+{
+	uint32 args;
+	int status;
+
+	args = use_crc ? 1 : 0;
+	if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma,
+	                              SDIOH_CMD_59, args, NULL, 0)) != SUCCESS) {
+		sd_err(("%s: No response to CMD59\n", __FUNCTION__));
+	}
+
+	sd_info(("CMD59 response data was: 0x%08x\n", sd->card_rsp_data));
+
+	sd_err(("SD-SPI CRC turned %s\n", use_crc ? "ON" : "OFF"));
+	return (SUCCESS);
+}
+
+static int
+sdspi_client_init(sdioh_info_t *sd)
+{
+	uint8 fn_ints;
+
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+	/* Start at ~400KHz clock rate for initialization */
+	if (!spi_start_clock(sd, 128)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+
+	if (!sdspi_start_power(sd)) {
+		sd_err(("sdspi_start_power failed\n"));
+		return ERROR;
+	}
+
+	if (sd->num_funcs == 0) {
+		sd_err(("%s: No IO funcs!\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	sdspi_card_enablefuncs(sd);
+
+	set_client_block_size(sd, 1, BLOCK_SIZE_4318);
+	fn_ints = INTR_CTL_FUNC1_EN;
+
+	if (sd->num_funcs >= 2) {
+		set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+		fn_ints |= INTR_CTL_FUNC2_EN;
+	}
+
+	/* Enable/Disable Client interrupts */
+	/* Turn on here but disable at host controller */
+	if (sdspi_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
+	                        (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
+		sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	/* Switch to High-speed clocking mode if both host and device support it */
+	sdspi_set_highspeed_mode(sd, (bool)sd_hiok);
+
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+
+	sd->card_init_done = TRUE;
+
+	return SUCCESS;
+}
+
+static int
+sdspi_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
+{
+	uint32 regdata;
+	int status;
+	bool hsmode;
+
+	if (HSMode == TRUE) {
+
+		sd_err(("Attempting to enable High-Speed mode.\n"));
+
+		if ((status = sdspi_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+		                                 1, &regdata)) != SUCCESS) {
+			return status;
+		}
+		if (regdata & SDIO_SPEED_SHS) {
+			sd_err(("Device supports High-Speed mode.\n"));
+
+			regdata |= SDIO_SPEED_EHS;
+
+			sd_err(("Writing %08x to Card at %08x\n",
+			         regdata, SDIOD_CCCR_SPEED_CONTROL));
+			if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+			                                  1, regdata)) != BCME_OK) {
+				return status;
+			}
+
+			hsmode = 1;
+
+			sd_err(("High-speed clocking mode enabled.\n"));
+		}
+		else {
+			sd_err(("Device does not support High-Speed Mode.\n"));
+			hsmode = 0;
+		}
+	} else {
+		if ((status = sdspi_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+		                                 1, &regdata)) != SUCCESS) {
+			return status;
+		}
+
+		regdata = ~SDIO_SPEED_EHS;
+
+		sd_err(("Writing %08x to Card at %08x\n",
+		         regdata, SDIOD_CCCR_SPEED_CONTROL));
+		if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+		                                  1, regdata)) != BCME_OK) {
+			return status;
+		}
+
+		sd_err(("Low-speed clocking mode enabled.\n"));
+		hsmode = 0;
+	}
+
+	spi_controller_highspeed_mode(sd, hsmode);
+
+	return TRUE;
+}
+
+bool
+sdspi_start_power(sdioh_info_t *sd)
+{
+	uint32 cmd_arg;
+	uint32 cmd_rsp;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's Operation Condition.  Occasionally the board
+	 * takes a while to become ready
+	 */
+
+	cmd_arg = 0;
+	if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
+		sd_err(("%s: Failed to get OCR; bailing\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	sd_err(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
+	sd_err(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
+	sd_err(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
+	sd_err(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+	/* Verify that the card supports I/O mode */
+	if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
+		sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
+
+	/* Examine voltage: Arasan only supports 3.3 volts,
+	 * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
+	 */
+
+	if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
+		sd_err(("This client does not support 3.3 volts!\n"));
+		return ERROR;
+	}
+
+
+	return TRUE;
+}
+
+static int
+sdspi_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if ((sdspi_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (sdspi_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+static int
+sdspi_card_enablefuncs(sdioh_info_t *sd)
+{
+	int status;
+	uint32 regdata;
+	uint32 regaddr, fbraddr;
+	uint8 func;
+	uint8 *ptr;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	/* Get the Card's common CIS address */
+	ptr = (uint8 *) &sd->com_cis_ptr;
+	for (regaddr = SDIOD_CCCR_CISPTR_0; regaddr <= SDIOD_CCCR_CISPTR_2; regaddr++) {
+		if ((status = sdspi_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			return status;
+
+		*ptr++ = (uint8) regdata;
+	}
+
+	/* Only the lower 17-bits are valid */
+	sd->com_cis_ptr &= 0x0001FFFF;
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		ptr = (uint8 *) &sd->func_cis_ptr[func];
+		for (regaddr = SDIOD_FBR_CISPTR_0; regaddr <= SDIOD_FBR_CISPTR_2; regaddr++) {
+			if ((status = sdspi_card_regread (sd, 0, regaddr + fbraddr, 1, &regdata))
+			    != SUCCESS)
+				return status;
+
+			*ptr++ = (uint8) regdata;
+		}
+
+		/* Only the lower 17-bits are valid */
+		sd->func_cis_ptr[func] &= 0x0001FFFF;
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd_info(("%s: write ESCI bit\n", __FUNCTION__));
+	/* Enable continuous SPI interrupt (ESCI bit) */
+	sdspi_card_regwrite(sd, 0, SDIOD_CCCR_BICTRL, 1, 0x60);
+
+	sd_info(("%s: enable f1\n", __FUNCTION__));
+	/* Enable function 1 on the card */
+	regdata = SDIO_FUNC_ENABLE_1;
+	if ((status = sdspi_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
+		return status;
+
+	sd_info(("%s: done\n", __FUNCTION__));
+	return SUCCESS;
+}
+
+/* Read client card reg */
+static int
+sdspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 rsp5;
+
+	cmd_arg = 0;
+
+	if ((func == 0) || (regsize == 1)) {
+		cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
+
+		if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_52, cmd_arg, NULL, 0))
+		    != SUCCESS)
+			return status;
+
+		sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+		if (rsp5 != 0x00)
+			sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+			        __FUNCTION__, rsp5, func));
+
+		*data = sd->card_rsp_data >> 24;
+	} else {
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+
+		sd->data_xfer_count = regsize;
+
+		/* sdspi_cmd_issue() returns with the command complete bit
+		 * in the ISR already cleared
+		 */
+		if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_53, cmd_arg, NULL, 0))
+		    != SUCCESS)
+			return status;
+
+		sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+		if (rsp5 != 0x00)
+			sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+			        __FUNCTION__, rsp5, func));
+
+		*data = sd->card_rsp_data;
+		if (regsize == 2) {
+			*data &= 0xffff;
+		}
+
+		sd_info(("%s: CMD53 func %d, addr 0x%x, size %d, data 0x%08x\n",
+		         __FUNCTION__, func, regaddr, regsize, *data));
+
+
+	}
+
+	return SUCCESS;
+}
+
+/* write a client register */
+static int
+sdspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, rsp5, flags;
+
+	cmd_arg = 0;
+
+	if ((func == 0) || (regsize == 1)) {
+		cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
+		if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_52, cmd_arg, NULL, 0))
+		    != SUCCESS)
+			return status;
+
+		sdspi_cmd_getrsp(sd, &rsp5, 1);
+		flags = GFIELD(rsp5, RSP5_FLAGS);
+		if (flags && (flags != 0x10))
+			sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
+			        __FUNCTION__,  flags));
+	}
+	else {
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+		sd->data_xfer_count = regsize;
+		sd->cmd53_wr_data = data;
+
+		sd_info(("%s: CMD53 func %d, addr 0x%x, size %d, data 0x%08x\n",
+		         __FUNCTION__, func, regaddr, regsize, data));
+
+		/* sdspi_cmd_issue() returns with the command complete bit
+		 * in the ISR already cleared
+		 */
+		if ((status = sdspi_cmd_issue(sd, sd->sd_use_dma, SDIOH_CMD_53, cmd_arg, NULL, 0))
+		    != SUCCESS)
+			return status;
+
+		sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+		if (rsp5 != 0x00)
+			sd_err(("%s: rsp5 flags = 0x%x, expecting 0x00\n",
+			        __FUNCTION__,  rsp5));
+
+	}
+	return SUCCESS;
+}
+
+void
+sdspi_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
+{
+	*rsp_buffer = sd->card_response;
+}
+
+int max_errors = 0;
+
+#define SPI_MAX_PKT_LEN		768
+uint8	spi_databuf[SPI_MAX_PKT_LEN];
+uint8	spi_rspbuf[SPI_MAX_PKT_LEN];
+
+/* datalen is used for CMD53 length only (0 for sd->data_xfer_count) */
+static int
+sdspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg,
+                uint32 *data, uint32 datalen)
+{
+	uint32 cmd_reg;
+	uint32 cmd_arg = arg;
+	uint8 cmd_crc = 0x95;		/* correct CRC for CMD0 and don't care for others. */
+	uint16 dat_crc;
+	uint8 cmd52data = 0;
+	uint32 i, j;
+	uint32 spi_datalen = 0;
+	uint32 spi_pre_cmd_pad	= 0;
+	uint32 spi_max_response_pad = 128;
+
+	cmd_reg = 0;
+	cmd_reg = SFIELD(cmd_reg, SPI_DIR, 1);
+	cmd_reg = SFIELD(cmd_reg, SPI_CMD_INDEX, cmd);
+
+	if (GFIELD(cmd_arg, CMD52_RW_FLAG) == 1) {	/* Same for CMD52 and CMD53 */
+		cmd_reg = SFIELD(cmd_reg, SPI_RW, 1);
+	}
+
+	switch (cmd) {
+	case SDIOH_CMD_59:	/* CRC_ON_OFF (SPI Mode Only) - Response R1 */
+		cmd52data = arg & 0x1;
+	case SDIOH_CMD_0:	/* Set Card to Idle State - No Response */
+	case SDIOH_CMD_5:	/* Send Operation condition - Response R4 */
+		sd_trace(("%s: CMD%d\n", __FUNCTION__, cmd));
+		spi_datalen = 44;
+		spi_pre_cmd_pad = 12;
+		spi_max_response_pad = 28;
+		break;
+
+	case SDIOH_CMD_3:	/* Ask card to send RCA - Response R6 */
+	case SDIOH_CMD_7:	/* Select card - Response R1 */
+	case SDIOH_CMD_15:	/* Set card to inactive state - Response None */
+		sd_err(("%s: CMD%d is invalid for SPI Mode.\n", __FUNCTION__, cmd));
+		return ERROR;
+		break;
+
+	case SDIOH_CMD_52:	/* IO R/W Direct (single byte) - Response R5 */
+		cmd52data = GFIELD(cmd_arg, CMD52_DATA);
+		cmd_arg = arg;
+		cmd_reg = SFIELD(cmd_reg, SPI_FUNC, GFIELD(cmd_arg, CMD52_FUNCTION));
+		cmd_reg = SFIELD(cmd_reg, SPI_ADDR, GFIELD(cmd_arg, CMD52_REG_ADDR));
+		/* Display trace for byte write */
+		if (GFIELD(cmd_arg, CMD52_RW_FLAG) == 1) {
+			sd_trace(("%s: CMD52: Wr F:%d @0x%04x=%02x\n",
+			          __FUNCTION__,
+			          GFIELD(cmd_arg, CMD52_FUNCTION),
+			          GFIELD(cmd_arg, CMD52_REG_ADDR),
+			          cmd52data));
+		}
+
+		spi_datalen = 32;
+		spi_max_response_pad = 28;
+
+		break;
+	case SDIOH_CMD_53:	/* IO R/W Extended (multiple bytes/blocks) */
+		cmd_arg = arg;
+		cmd_reg = SFIELD(cmd_reg, SPI_FUNC, GFIELD(cmd_arg, CMD53_FUNCTION));
+		cmd_reg = SFIELD(cmd_reg, SPI_ADDR, GFIELD(cmd_arg, CMD53_REG_ADDR));
+		cmd_reg = SFIELD(cmd_reg, SPI_BLKMODE, 0);
+		cmd_reg = SFIELD(cmd_reg, SPI_OPCODE, GFIELD(cmd_arg, CMD53_OP_CODE));
+		cmd_reg = SFIELD(cmd_reg, SPI_STUFF0, (sd->data_xfer_count>>8));
+		cmd52data = (uint8)sd->data_xfer_count;
+
+		/* Set upper bit in byte count if necessary, but don't set it for 512 bytes. */
+		if ((sd->data_xfer_count > 255) && (sd->data_xfer_count < 512)) {
+			cmd_reg |= 1;
+		}
+
+		if (GFIELD(cmd_reg, SPI_RW) == 1) { /* Write */
+			spi_max_response_pad = 32;
+			spi_datalen = (sd->data_xfer_count + spi_max_response_pad) & 0xFFFC;
+		} else { /* Read */
+
+			spi_max_response_pad = 32;
+			spi_datalen = (sd->data_xfer_count + spi_max_response_pad) & 0xFFFC;
+		}
+		sd_trace(("%s: CMD53: %s F:%d @0x%04x len=0x%02x\n",
+		          __FUNCTION__,
+		          (GFIELD(cmd_reg, SPI_RW) == 1 ? "Wr" : "Rd"),
+		          GFIELD(cmd_arg, CMD53_FUNCTION),
+		          GFIELD(cmd_arg, CMD53_REG_ADDR),
+		          cmd52data));
+		break;
+
+	default:
+		sd_err(("%s: Unknown command %d\n", __FUNCTION__, cmd));
+		return ERROR;
+	}
+
+	/* Set up and issue the SDIO command */
+	memset(spi_databuf, SDSPI_IDLE_PAD, spi_datalen);
+	spi_databuf[spi_pre_cmd_pad + 0] = (cmd_reg & 0xFF000000) >> 24;
+	spi_databuf[spi_pre_cmd_pad + 1] = (cmd_reg & 0x00FF0000) >> 16;
+	spi_databuf[spi_pre_cmd_pad + 2] = (cmd_reg & 0x0000FF00) >> 8;
+	spi_databuf[spi_pre_cmd_pad + 3] = (cmd_reg & 0x000000FF);
+	spi_databuf[spi_pre_cmd_pad + 4] = cmd52data;
+
+	/* Generate CRC7 for command, if CRC is enabled, otherwise, a
+	 * default CRC7 of 0x95, which is correct for CMD0, is used.
+	 */
+	if (sd_crc) {
+		cmd_crc = sdspi_crc7(&spi_databuf[spi_pre_cmd_pad], 5);
+	}
+	spi_databuf[spi_pre_cmd_pad + 5] = cmd_crc;
+#define SPI_STOP_TRAN		0xFD
+
+	/* for CMD53 Write, put the data into the output buffer  */
+	if ((cmd == SDIOH_CMD_53) && (GFIELD(cmd_arg, CMD53_RW_FLAG) == 1)) {
+		if (datalen != 0) {
+			spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD;
+			spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK;
+
+			for (i = 0; i < sd->data_xfer_count; i++) {
+				spi_databuf[i + 11 + spi_pre_cmd_pad] = ((uint8 *)data)[i];
+			}
+			if (sd_crc) {
+				dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], i);
+			} else {
+				dat_crc = 0xAAAA;
+			}
+			spi_databuf[i + 11 + spi_pre_cmd_pad] = (dat_crc >> 8) & 0xFF;
+			spi_databuf[i + 12 + spi_pre_cmd_pad] = dat_crc & 0xFF;
+		} else if (sd->data_xfer_count == 2) {
+			spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD;
+			spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK;
+			spi_databuf[spi_pre_cmd_pad + 11]  = sd->cmd53_wr_data & 0xFF;
+			spi_databuf[spi_pre_cmd_pad + 12] = (sd->cmd53_wr_data & 0x0000FF00) >> 8;
+			if (sd_crc) {
+				dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], 2);
+			} else {
+				dat_crc = 0x22AA;
+			}
+			spi_databuf[spi_pre_cmd_pad + 13] = (dat_crc >> 8) & 0xFF;
+			spi_databuf[spi_pre_cmd_pad + 14] = (dat_crc & 0xFF);
+		} else if (sd->data_xfer_count == 4) {
+			spi_databuf[spi_pre_cmd_pad + 9] = SDSPI_IDLE_PAD;
+			spi_databuf[spi_pre_cmd_pad + 10] = SDSPI_START_BLOCK;
+			spi_databuf[spi_pre_cmd_pad + 11]  = sd->cmd53_wr_data & 0xFF;
+			spi_databuf[spi_pre_cmd_pad + 12] = (sd->cmd53_wr_data & 0x0000FF00) >> 8;
+			spi_databuf[spi_pre_cmd_pad + 13] = (sd->cmd53_wr_data & 0x00FF0000) >> 16;
+			spi_databuf[spi_pre_cmd_pad + 14] = (sd->cmd53_wr_data & 0xFF000000) >> 24;
+			if (sd_crc) {
+				dat_crc = sdspi_crc16(&spi_databuf[spi_pre_cmd_pad+11], 4);
+			} else {
+				dat_crc = 0x44AA;
+			}
+			spi_databuf[spi_pre_cmd_pad + 15] = (dat_crc >> 8) & 0xFF;
+			spi_databuf[spi_pre_cmd_pad + 16] = (dat_crc & 0xFF);
+		} else {
+			printf("CMD53 Write: size %d unsupported\n", sd->data_xfer_count);
+		}
+	}
+
+	spi_sendrecv(sd, spi_databuf, spi_rspbuf, spi_datalen);
+
+	for (i = spi_pre_cmd_pad + SDSPI_COMMAND_LEN; i < spi_max_response_pad; i++) {
+		if ((spi_rspbuf[i] & SDSPI_START_BIT_MASK) == 0) {
+			break;
+		}
+	}
+
+	if (i == spi_max_response_pad) {
+		sd_err(("%s: Did not get a response for CMD%d\n", __FUNCTION__, cmd));
+		return ERROR;
+	}
+
+	/* Extract the response. */
+	sd->card_response = spi_rspbuf[i];
+
+	/* for CMD53 Read, find the start of the response data... */
+	if ((cmd == SDIOH_CMD_53) && (GFIELD(cmd_arg, CMD52_RW_FLAG) == 0)) {
+		for (; i < spi_max_response_pad; i++) {
+			if (spi_rspbuf[i] == SDSPI_START_BLOCK) {
+				break;
+			}
+		}
+
+		if (i == spi_max_response_pad) {
+			printf("Did not get a start of data phase for CMD%d\n", cmd);
+			max_errors++;
+			sdspi_abort(sd, GFIELD(cmd_arg, CMD53_FUNCTION));
+		}
+		sd->card_rsp_data = spi_rspbuf[i+1];
+		sd->card_rsp_data |= spi_rspbuf[i+2] << 8;
+		sd->card_rsp_data |= spi_rspbuf[i+3] << 16;
+		sd->card_rsp_data |= spi_rspbuf[i+4] << 24;
+
+		if (datalen != 0) {
+			i++;
+			for (j = 0; j < sd->data_xfer_count; j++) {
+				((uint8 *)data)[j] = spi_rspbuf[i+j];
+			}
+			if (sd_crc) {
+				uint16 recv_crc;
+
+				recv_crc = spi_rspbuf[i+j] << 8 | spi_rspbuf[i+j+1];
+				dat_crc = sdspi_crc16((uint8 *)data, datalen);
+				if (dat_crc != recv_crc) {
+					sd_err(("%s: Incorrect data CRC: expected 0x%04x, "
+					        "received 0x%04x\n",
+					        __FUNCTION__, dat_crc, recv_crc));
+				}
+			}
+		}
+		return SUCCESS;
+	}
+
+	sd->card_rsp_data = spi_rspbuf[i+4];
+	sd->card_rsp_data |= spi_rspbuf[i+3] << 8;
+	sd->card_rsp_data |= spi_rspbuf[i+2] << 16;
+	sd->card_rsp_data |= spi_rspbuf[i+1] << 24;
+
+	/* Display trace for byte read */
+	if ((cmd == SDIOH_CMD_52) && (GFIELD(cmd_arg, CMD52_RW_FLAG) == 0)) {
+		sd_trace(("%s: CMD52: Rd F:%d @0x%04x=%02x\n",
+		          __FUNCTION__,
+		          GFIELD(cmd_arg, CMD53_FUNCTION),
+		          GFIELD(cmd_arg, CMD53_REG_ADDR),
+		          sd->card_rsp_data >> 24));
+	}
+
+	return SUCCESS;
+}
+
+/*
+ * On entry: if single-block or non-block, buffer size <= block size.
+ * If multi-block, buffer size is unlimited.
+ * Question is how to handle the left-overs in either single- or multi-block.
+ * I think the caller should break the buffer up so this routine will always
+ * use block size == buffer size to handle the end piece of the buffer
+ */
+
+static int
+sdspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 rsp5;
+	int num_blocks, blocksize;
+	bool local_blockmode, local_dma;
+	bool read = rw == SDIOH_READ ? 1 : 0;
+
+	ASSERT(nbytes);
+
+	cmd_arg = 0;
+	sd_data(("%s: %s 53 func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, read ? "Rd" : "Wr", func, fifo ? "FIXED" : "INCR",
+	         addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+	if (read) sd->r_cnt++; else sd->t_cnt++;
+
+	local_blockmode = sd->sd_blockmode;
+	local_dma = sd->sd_use_dma;
+
+	/* Don't bother with block mode on small xfers */
+	if (nbytes < sd->client_block_size[func]) {
+		sd_info(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
+		         nbytes, sd->client_block_size[func]));
+		local_blockmode = FALSE;
+		local_dma = FALSE;
+	}
+
+	if (local_blockmode) {
+		blocksize = MIN(sd->client_block_size[func], nbytes);
+		num_blocks = nbytes/blocksize;
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
+	} else {
+		num_blocks =  1;
+		blocksize = nbytes;
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+	}
+
+	if (fifo)
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+
+	cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
+	if (read)
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+	else
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+	sd->data_xfer_count = nbytes;
+	if ((func == 2) && (fifo == 1)) {
+		sd_data(("%s: %s 53 func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+		         __FUNCTION__, read ? "Rd" : "Wr", func, fifo ? "FIXED" : "INCR",
+		         addr, nbytes, sd->r_cnt, sd->t_cnt));
+	}
+
+	/* sdspi_cmd_issue() returns with the command complete bit
+	 * in the ISR already cleared
+	 */
+	if ((status = sdspi_cmd_issue(sd, local_dma,
+	                              SDIOH_CMD_53, cmd_arg,
+	                              data, nbytes)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
+		return status;
+	}
+
+	sdspi_cmd_getrsp(sd, &rsp5, 1);
+
+	if (rsp5 != 0x00) {
+		sd_err(("%s: rsp5 flags = 0x%x, expecting 0x00\n",
+		        __FUNCTION__,  rsp5));
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+static int
+set_client_block_size(sdioh_info_t *sd, int func, int block_size)
+{
+	int base;
+	int err = 0;
+
+	sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
+	sd->client_block_size[func] = block_size;
+
+	/* Set the block size in the SDIO Card register */
+	base = func * SDIOD_FBR_SIZE;
+	err = sdspi_card_regwrite(sd, 0, base + SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
+	if (!err) {
+		err = sdspi_card_regwrite(sd, 0, base + SDIOD_CCCR_BLKSIZE_1, 1,
+		                          (block_size >> 8) & 0xff);
+	}
+
+	/*
+	 * Do not set the block size in the SDIO Host register; that
+	 * is func dependent and will get done on an individual
+	 * transaction basis.
+	 */
+
+	return (err ? BCME_SDIO_ERROR : 0);
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	si->card_init_done = FALSE;
+	return sdspi_client_init(si);
+}
+
+#define CRC7_POLYNOM	0x09
+#define CRC7_CRCHIGHBIT	0x40
+
+static uint8 sdspi_crc7(unsigned char* p, uint32 len)
+{
+	uint8 c, j, bit, crc = 0;
+	uint32 i;
+
+	for (i = 0; i < len; i++) {
+		c = *p++;
+		for (j = 0x80; j; j >>= 1) {
+			bit = crc & CRC7_CRCHIGHBIT;
+			crc <<= 1;
+			if (c & j) bit ^= CRC7_CRCHIGHBIT;
+			if (bit) crc ^= CRC7_POLYNOM;
+		}
+	}
+
+	/* Convert the CRC7 to an 8-bit SD CRC */
+	crc = (crc << 1) | 1;
+
+	return (crc);
+}
+
+#define CRC16_POLYNOM	0x1021
+#define CRC16_CRCHIGHBIT	0x8000
+
+static uint16 sdspi_crc16(unsigned char* p, uint32 len)
+{
+	uint32 i;
+	uint16 j, c, bit;
+	uint16 crc = 0;
+
+	for (i = 0; i < len; i++) {
+		c = *p++;
+		for (j = 0x80; j; j >>= 1) {
+			bit = crc & CRC16_CRCHIGHBIT;
+			crc <<= 1;
+			if (c & j) bit ^= CRC16_CRCHIGHBIT;
+			if (bit) crc ^= CRC16_POLYNOM;
+		}
+	}
+
+	return (crc);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdspi_linux.c b/drivers/net/wireless/bcm4329/bcmsdspi_linux.c
new file mode 100644
index 0000000..e2e0ca6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdspi_linux.c
@@ -0,0 +1,252 @@
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi_linux.c,v 1.7.2.1.4.3 2008/06/30 21:09:36 Exp $
+ */
+
+#include <typedefs.h>
+#include <pcicfg.h>
+#include <bcmutils.h>
+
+#include <sdio.h>		/* SDIO Specs */
+#include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>		/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq(), free_irq() */
+
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE()	(!in_atomic())
+#else
+#define BLOCKABLE()	(!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+	sdioh_info_t *sd;
+	struct sdos_info *sdos;
+	bool ours;
+
+	sd = (sdioh_info_t *)dev_id;
+	sd->local_intrcount++;
+
+	if (!sd->card_init_done) {
+		sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+		return IRQ_RETVAL(FALSE);
+	} else {
+		ours = spi_check_client_intr(sd, NULL);
+
+		/* For local interrupts, wake the waiting process */
+		if (ours && sd->got_hcint) {
+			sdos = (struct sdos_info *)sd->sdos_info;
+			wake_up_interruptible(&sdos->intr_wait_queue);
+		}
+
+		return IRQ_RETVAL(ours);
+	}
+}
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+	sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+	if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+		sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+		return ERROR;
+	}
+	return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+	free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+	REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+	sd->sdos_info = (void*)sdos;
+	if (sdos == NULL)
+		return BCME_NOMEM;
+
+	sdos->sd = sd;
+	spin_lock_init(&sdos->lock);
+	init_waitqueue_head(&sdos->intr_wait_queue);
+	return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+	ASSERT(sd && sd->sdos_info);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	if (!(sd->host_init_done && sd->card_init_done)) {
+		sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+		sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	/* Ensure atomicity for enable/disable calls */
+	spin_lock_irqsave(&sdos->lock, flags);
+
+	sd->client_intr_enabled = enable;
+	if (enable && !sd->lockcount)
+		spi_devintr_on(sd);
+	else
+		spi_devintr_off(sd);
+
+	spin_unlock_irqrestore(&sdos->lock, flags);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (sd->lockcount) {
+		sd_err(("%s: Already locked!\n", __FUNCTION__));
+		ASSERT(sd->lockcount == 0);
+	}
+	spi_devintr_off(sd);
+	sd->lockcount++;
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+	ASSERT(sd->lockcount > 0);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+		spi_devintr_on(sd);
+	}
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+
+#ifndef BCMSDYIELD
+	ASSERT(!yield);
+#endif
+	sd_trace(("%s: yield %d canblock %d\n",
+	          __FUNCTION__, yield, BLOCKABLE()));
+
+	/* Clear the "interrupt happened" flag and last intrstatus */
+	sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+	if (yield && BLOCKABLE()) {
+		/* Wait for the indication, the interrupt will be masked when the ISR fires. */
+		wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+	} else
+#endif /* BCMSDYIELD */
+	{
+		spi_spinbits(sd);
+	}
+
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdstd.c b/drivers/net/wireless/bcm4329/bcmsdstd.c
new file mode 100644
index 0000000..0ca1f8f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdstd.c
@@ -0,0 +1,3127 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.c,v 1.64.4.1.4.4.2.18 2010/08/17 17:00:48 Exp $
+ */
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <siutils.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+#include <pcicfg.h>
+
+
+#define SD_PAGE_BITS	12
+#define SD_PAGE 	(1 << SD_PAGE_BITS)
+
+#include <bcmsdstd.h>
+
+/* Globals */
+uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_hiok = TRUE;			/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = 64;		/* Default blocksize */
+
+#ifdef BCMSDYIELD
+bool sd_yieldcpu = TRUE;		/* Allow CPU yielding for buffer requests */
+uint sd_minyield = 0;			/* Minimum xfer size to allow CPU yield */
+bool sd_forcerb = FALSE;		/* Force sync readback in intrs_on/off */
+#endif
+
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+uint8 sd_dma_mode = DMA_MODE_SDMA; /* Default to SDMA for now */
+
+uint sd_toctl = 7;
+
+static bool trap_errs = FALSE;
+
+static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
+
+/* Prototypes */
+static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
+static bool sdstd_start_power(sdioh_info_t *sd);
+static bool sdstd_bus_width(sdioh_info_t *sd, int width);
+static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
+static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
+static int sdstd_card_enablefuncs(sdioh_info_t *sd);
+static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
+static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
+static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int sdstd_driver_init(sdioh_info_t *sd);
+static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
+static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int sdstd_abort(sdioh_info_t *sd, uint func);
+static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
+static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
+static void sd_map_dma(sdioh_info_t * sd);
+static void sd_unmap_dma(sdioh_info_t * sd);
+static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
+static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
+static void sd_create_adma_descriptor(sdioh_info_t *sd,
+                                      uint32 index, uint32 addr_phys,
+                                      uint16 length, uint16 flags);
+static void sd_dump_adma_dscr(sdioh_info_t *sd);
+static void sdstd_dumpregs(sdioh_info_t *sd);
+
+
+/*
+ * Private register access routines.
+ */
+
+/* 16 bit PCI regs */
+
+extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
+uint16
+sdstd_rreg16(sdioh_info_t *sd, uint reg)
+{
+
+	volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+	sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
+	return data;
+}
+
+extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
+void
+sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
+{
+	*(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+	sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+
+static void
+sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
+{
+	volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+	sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
+	data |= val;
+	*(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+
+}
+static void
+sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
+{
+
+	volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
+	sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
+	data &= ~mask;
+	data |= (val & mask);
+	*(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
+}
+
+
+/* 32 bit PCI regs */
+static uint32
+sdstd_rreg(sdioh_info_t *sd, uint reg)
+{
+	volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
+	sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
+	return data;
+}
+static inline void
+sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
+{
+	*(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
+	sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
+
+}
+
+/* 8 bit PCI regs */
+static inline void
+sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
+{
+	*(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
+	sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
+}
+static uint8
+sdstd_rreg8(sdioh_info_t *sd, uint reg)
+{
+	volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
+	sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
+	return data;
+}
+
+/*
+ * Private work routines
+ */
+
+sdioh_info_t *glob_sd;
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	glob_sd = sd;
+	sd->osh = osh;
+	if (sdstd_osinit(sd) != 0) {
+		sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+	sd->mem_space = (volatile char *)sdstd_reg_map(osh, (uintptr)bar0, SDIOH_REG_WINSZ);
+	sd_init_dma(sd);
+	sd->irq = irq;
+	if (sd->mem_space == NULL) {
+		sd_err(("%s:ioremap() failed\n", __FUNCTION__));
+		sdstd_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+	sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->sd_dma_mode = sd_dma_mode;
+
+	if (!sd->sd_blockmode)
+		sd->sd_dma_mode = DMA_MODE_NONE;
+
+	if (sdstd_driver_init(sd) != SUCCESS) {
+		/* If host CPU was reset without resetting SD bus or
+		   SD device, the device will still have its RCA but
+		   driver no longer knows what it is (since driver has been restarted).
+		   go through once to clear the RCA and a gain reassign it.
+		 */
+		sd_info(("driver_init failed - Reset RCA and try again\n"));
+		if (sdstd_driver_init(sd) != SUCCESS) {
+			sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
+			if (sd->mem_space) {
+				sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+				sd->mem_space = NULL;
+			}
+			sdstd_osfree(sd);
+			MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+			return (NULL);
+		}
+	}
+
+	OSL_DMADDRWIDTH(osh, 32);
+
+	/* Always map DMA buffers, so we can switch between DMA modes. */
+	sd_map_dma(sd);
+
+	if (sdstd_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		sdstd_free_irq(sd->irq, sd);
+		if (sd->mem_space) {
+			sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+			sd->mem_space = NULL;
+		}
+
+		sdstd_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd) {
+		sd_unmap_dma(sd);
+		sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+		sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
+		sdstd_free_irq(sd->irq, sd);
+		if (sd->card_init_done)
+			sdstd_reset(sd, 1, 1);
+		if (sd->mem_space) {
+			sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+			sd->mem_space = NULL;
+		}
+
+		sdstd_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we receive client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	uint16 intrstatus;
+	intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
+	return !!(intrstatus & CLIENT_INTR);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_YIELDCPU,
+	IOV_MINYIELD,
+	IOV_FORCERB,
+	IOV_CLOCK
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE,	0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_UINT32,	0 },
+#ifdef BCMSDYIELD
+	{"sd_yieldcpu",	IOV_YIELDCPU,	0,	IOVT_BOOL,	0 },
+	{"sd_minyield",	IOV_MINYIELD,	0,	IOVT_UINT32,	0 },
+	{"sd_forcerb",	IOV_FORCERB,	0,	IOVT_BOOL,	0 },
+#endif
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		if (!si->sd_blockmode)
+			si->sd_dma_mode = DMA_MODE_NONE;
+		break;
+
+#ifdef BCMSDYIELD
+	case IOV_GVAL(IOV_YIELDCPU):
+		int_val = sd_yieldcpu;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_YIELDCPU):
+		sd_yieldcpu = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_MINYIELD):
+		int_val = sd_minyield;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MINYIELD):
+		sd_minyield = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_FORCERB):
+		int_val = sd_forcerb;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FORCERB):
+		sd_forcerb = (bool)int_val;
+		break;
+#endif /* BCMSDYIELD */
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		sdstd_lock(si);
+		bcmerror = set_client_block_size(si, func, blksize);
+		sdstd_unlock(si);
+		break;
+	}
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_dma_mode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_dma_mode = (char)int_val;
+		sdstd_set_dma_mode(si, si->sd_dma_mode);
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("set clock failed!\n"));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		if (sd_power == 1) {
+			if (sdstd_driver_init(si) != SUCCESS) {
+				sd_err(("set SD Slot power failed!\n"));
+				bcmerror = BCME_ERROR;
+			} else {
+				sd_err(("SD Slot Powered ON.\n"));
+			}
+		} else {
+			uint8 pwr = 0;
+
+			pwr = SFIELD(pwr, PWR_BUS_EN, 0);
+			sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
+			sd_err(("SD Slot Powered OFF.\n"));
+		}
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		if (sd_clock == 1) {
+			sd_info(("SD Clock turned ON.\n"));
+			if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
+				sd_err(("sdstd_start_clock failed\n"));
+				bcmerror = BCME_ERROR;
+			}
+		} else {
+			/* turn off HC clock */
+			sdstd_wreg16(si, SD_ClockCntrl,
+			             sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
+
+			sd_info(("SD Clock turned OFF.\n"));
+		}
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+
+		if (!sdstd_bus_width(si, sd_sdmode)) {
+			sd_err(("sdstd_bus_width failed\n"));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+		          (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		          sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			int_val = sdstd_rreg8(si, sd_ptr->offset);
+		else if (sd_ptr->offset & 2)
+			int_val = sdstd_rreg16(si, sd_ptr->offset);
+		else
+			int_val = sdstd_rreg(si, sd_ptr->offset);
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+		          (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		          sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
+		else if (sd_ptr->offset & 2)
+			sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
+		else
+			sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdstd_lock(sd);
+	*cis = 0;
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			sdstd_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+	sdstd_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 rsp5;
+
+	sdstd_lock(sd);
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+	cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
+
+	if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
+		sdstd_unlock(sd);
+		return status;
+	}
+
+	sdstd_cmd_getrsp(sd, &rsp5, 1);
+	if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
+		sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+		        __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+	}
+	if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+		sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+		        __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+	if (GFIELD(rsp5, RSP5_STUFF))
+		sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+		        __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+	if (rw == SDIOH_READ)
+		*byte = GFIELD(rsp5, RSP5_DATA);
+
+	sdstd_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+	bool swap = FALSE;
+
+	sdstd_lock(sd);
+
+	if (rw == SDIOH_READ) {
+		status = sdstd_card_regread(sd, func, addr, nbytes, word);
+		if (swap)
+			*word = BCMSWAP32(*word);
+	} else {
+		if (swap)
+			*word = BCMSWAP32(*word);
+		status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
+	}
+
+	sdstd_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	uint8 *localbuf = NULL, *tmpbuf = NULL;
+	uint tmplen = 0;
+	bool local_blockmode = sd->sd_blockmode;
+
+	sdstd_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks:
+	 * Bytemode: 1 block at a time.
+	 * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
+	 * Both: leftovers are handled last (will be sent via bytemode).
+	 */
+	while (buflen > 0) {
+		if (local_blockmode) {
+			/* Max xfer is Page size */
+			len = MIN(SD_PAGE, buflen);
+
+			/* Round down to a block boundry */
+			if (buflen > sd->client_block_size[func])
+				len = (len/sd->client_block_size[func]) *
+				        sd->client_block_size[func];
+			if ((func == SDIO_FUNC_1) && ((len % 4) == 3) && (rw == SDIOH_WRITE)) {
+				tmplen = len;
+				sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
+				len++;
+				tmpbuf = buffer;
+				if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
+					sd_err(("out of memory, malloced %d bytes\n",
+					        MALLOCED(sd->osh)));
+					sdstd_unlock(sd);
+					return SDIOH_API_RC_FAIL;
+				}
+				bcopy(buffer, localbuf, len);
+				buffer = localbuf;
+			}
+		} else {
+			/* Byte mode: One block at a time */
+			len = MIN(sd->client_block_size[func], buflen);
+		}
+
+		if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			sdstd_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+
+		if (local_blockmode) {
+			if ((func == SDIO_FUNC_1) && ((tmplen % 4) == 3) && (rw == SDIOH_WRITE)) {
+				if (localbuf)
+					MFREE(sd->osh, localbuf, len);
+				len--;
+				buffer = tmpbuf;
+				sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
+			}
+		}
+
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	sdstd_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+static
+int sdstd_abort(sdioh_info_t *sd, uint func)
+{
+	int err = 0;
+	int retries;
+
+	uint16 cmd_reg;
+	uint32 cmd_arg;
+	uint32 rsp5;
+	uint8 rflags;
+
+	uint16 int_reg = 0;
+	uint16 plain_intstatus;
+
+	/* Argument is write to F0 (CCCR) IOAbort with function number */
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
+	cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
+	cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
+	cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+	cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
+
+	/* Command is CMD52 write */
+	cmd_reg = 0;
+	cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
+	cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+	cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+	cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+	cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
+	cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
+
+	if (sd->sd_mode == SDIOH_MODE_SPI) {
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+	}
+
+	/* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
+	retries = RETRIES_SMALL;
+	while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
+		if (retries == RETRIES_SMALL)
+			sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
+			        __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+		if (!--retries) {
+			sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
+			        __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
+			if (trap_errs)
+				ASSERT(0);
+			err = BCME_SDIO_ERROR;
+			goto done;
+		}
+	}
+
+	/* Clear errors from any previous commands */
+	if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
+		sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
+		sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+	}
+	plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
+	if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
+		sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
+		if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
+			sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
+		}
+		if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
+			sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
+			err = BCME_NODEVICE;
+			goto done;
+		}
+	}
+
+	/* Issue the command */
+	sdstd_wreg(sd, SD_Arg0, cmd_arg);
+	sdstd_wreg16(sd, SD_Command, cmd_reg);
+
+	/* In interrupt mode return, expect later CMD_COMPLETE interrupt */
+	if (!sd->polled_mode)
+		return err;
+
+	/* Otherwise, wait for the command to complete */
+	retries = RETRIES_LARGE;
+	do {
+		int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+	} while (--retries &&
+	         (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+	         (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+	/* If command completion fails, do a cmd reset and note the error */
+	if (!retries) {
+		sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
+		        __FUNCTION__, int_reg,
+		        sdstd_rreg16(sd, SD_ErrorIntrStatus),
+		        sdstd_rreg(sd, SD_PresentState)));
+
+		sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+		retries = RETRIES_LARGE;
+		do {
+			sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+		} while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+		                 SW_RESET_CMD)) && retries--);
+
+		if (!retries) {
+			sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+		}
+
+		if (trap_errs)
+			ASSERT(0);
+
+		err = BCME_SDIO_ERROR;
+	}
+
+	/* Clear Command Complete interrupt */
+	int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+	sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+	/* Check for Errors */
+	if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
+		sd_err(("%s: ErrorintrStatus: 0x%x, "
+		        "(intrstatus = 0x%x, present state 0x%x) clearing\n",
+		        __FUNCTION__, plain_intstatus,
+		        sdstd_rreg16(sd, SD_IntrStatus),
+		        sdstd_rreg(sd, SD_PresentState)));
+
+		sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
+
+		sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+		retries = RETRIES_LARGE;
+		do {
+			sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
+		} while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
+		                 SW_RESET_DAT)) && retries--);
+
+		if (!retries) {
+			sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+		}
+
+		if (trap_errs)
+			ASSERT(0);
+
+		/* ABORT is dataless, only cmd errs count */
+		if (plain_intstatus & ERRINT_CMD_ERRS)
+			err = BCME_SDIO_ERROR;
+	}
+
+	/* If command failed don't bother looking at response */
+	if (err)
+		goto done;
+
+	/* Otherwise, check the response */
+	sdstd_cmd_getrsp(sd, &rsp5, 1);
+	rflags = GFIELD(rsp5, RSP5_FLAGS);
+
+	if (rflags & SD_RSP_R5_ERRBITS) {
+		sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
+
+		/* The CRC error flag applies to the previous command */
+		if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
+			err = BCME_SDIO_ERROR;
+			goto done;
+		}
+	}
+
+	if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
+	    ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
+		sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
+		err = BCME_SDIO_ERROR;
+		goto done;
+	}
+
+	if (GFIELD(rsp5, RSP5_STUFF)) {
+		sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+		        __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+		err = BCME_SDIO_ERROR;
+		goto done;
+	}
+
+done:
+	if (err == BCME_NODEVICE)
+		return err;
+
+	sdstd_wreg8(sd, SD_SoftwareReset,
+	            SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
+
+	retries = RETRIES_LARGE;
+	do {
+		rflags = sdstd_rreg8(sd, SD_SoftwareReset);
+		if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
+			break;
+	} while (--retries);
+
+	if (!retries) {
+		sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
+		        __FUNCTION__, rflags));
+		err = BCME_SDIO_ERROR;
+	}
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint fnum)
+{
+	int ret;
+
+	sdstd_lock(sd);
+	ret = sdstd_abort(sd, fnum);
+	sdstd_unlock(sd);
+
+	return ret;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+static int
+sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
+{
+	uint16 regval;
+	uint retries;
+	uint function = 0;
+
+	/* If no errors, we're done */
+	if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
+		return SUCCESS;
+
+	sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
+	        __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
+	        sdstd_rreg(sdioh_info, SD_PresentState)));
+	sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
+
+	/* On command error, issue CMD reset */
+	if (regval & ERRINT_CMD_ERRS) {
+		sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
+		sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+		for (retries = RETRIES_LARGE; retries; retries--)
+			if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
+				break;
+		if (!retries) {
+			sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+		}
+	}
+
+	/* On data error, issue DAT reset */
+	if (regval & ERRINT_DATA_ERRS) {
+		sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
+		sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
+		for (retries = RETRIES_LARGE; retries; retries--)
+			if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
+				break;
+		if (!retries) {
+			sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
+		}
+	}
+
+	/* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
+	if (cmd == SDIOH_CMD_53)
+		function = GFIELD(arg, CMD53_FUNCTION);
+	else if (cmd == SDIOH_CMD_52)
+		function = GFIELD(arg, CMD52_FUNCTION);
+	if (function) {
+		sd_trace(("%s: requesting abort for function %d after cmd %d\n",
+		          __FUNCTION__, function, cmd));
+		sdstd_abort(sdioh_info, function);
+	}
+
+	if (trap_errs)
+		ASSERT(0);
+
+	return ERROR;
+}
+
+
+
+/*
+ * Private/Static work routines
+ */
+static bool
+sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
+{
+	int retries = RETRIES_LARGE;
+	uchar regval;
+
+	if (!sd)
+		return TRUE;
+
+	sdstd_lock(sd);
+	/* Reset client card */
+	if (client_reset && (sd->adapter_slot != -1)) {
+		if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
+			sd_err(("%s: Cannot write to card reg 0x%x\n",
+			        __FUNCTION__, SDIOD_CCCR_IOABORT));
+		else
+			sd->card_rca = 0;
+	}
+
+	/* Reset host controller */
+	if (host_reset) {
+		regval = SFIELD(0, SW_RESET_ALL, 1);
+		sdstd_wreg8(sd, SD_SoftwareReset, regval);
+		do {
+			sd_trace(("%s: waiting for reset\n", __FUNCTION__));
+		} while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
+
+		if (!retries) {
+			sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
+			sdstd_unlock(sd);
+			return (FALSE);
+		}
+
+		/* A reset should reset bus back to 1 bit mode */
+		sd->sd_mode = SDIOH_MODE_SD1;
+		sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+	}
+	sdstd_unlock(sd);
+	return TRUE;
+}
+
+/* Disable device interrupt */
+void
+sdstd_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	if (sd->use_client_ints) {
+		sd->intmask &= ~CLIENT_INTR;
+		sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+		sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+	}
+}
+
+/* Enable device interrupt */
+void
+sdstd_devintr_on(sdioh_info_t *sd)
+{
+	ASSERT(sd->lockcount == 0);
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	if (sd->use_client_ints) {
+		uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
+		sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
+		sdstd_wreg16(sd, SD_IntrStatusEnable, status);
+
+		sd->intmask |= CLIENT_INTR;
+		sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+		sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+	}
+}
+
+#ifdef BCMSDYIELD
+/* Enable/disable other interrupts */
+void
+sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+	if (err) {
+		norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+		sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
+	}
+
+	sd->intmask |= norm;
+	sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+	if (sd_forcerb)
+		sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+
+void
+sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+	if (err) {
+		norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+		sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+	}
+
+	sd->intmask &= ~norm;
+	sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
+	if (sd_forcerb)
+		sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
+}
+#endif /* BCMSDYIELD */
+
+static int
+sdstd_host_init(sdioh_info_t *sd)
+{
+	int 		num_slots, full_slot;
+	uint8		reg8;
+
+	uint32		card_ins;
+	int			slot, first_bar = 0;
+	bool		detect_slots = FALSE;
+	uint		bar;
+
+	/* Check for Arasan ID */
+	if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
+		sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
+		sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
+		detect_slots = TRUE;
+	} else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
+		sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
+		sd->controller_type = SDIOH_TYPE_BCM27XX;
+		detect_slots = FALSE;
+	} else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
+		sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
+		sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
+		detect_slots = TRUE;
+	} else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
+		sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
+			__FUNCTION__));
+		sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
+		detect_slots = TRUE;
+	} else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
+		sd_info(("%s: JMicron Standard SDIO Host Controller\n",
+			__FUNCTION__));
+		sd->controller_type = SDIOH_TYPE_JMICRON;
+		detect_slots = TRUE;
+	} else {
+		return ERROR;
+	}
+
+	/*
+	 * Determine num of slots
+	 * Search each slot
+	 */
+
+	first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
+	num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
+	num_slots &= 7;
+	num_slots++;   	/* map bits to num slots according to spec */
+
+	if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
+	    ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
+		sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
+		/* Set BAR0 Window to SDIOSTH core */
+		OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
+
+		/* Set defaults particular to this controller. */
+		detect_slots = TRUE;
+		num_slots = 1;
+		first_bar = 0;
+
+		/* Controller supports ADMA2, so turn it on here. */
+		sd->sd_dma_mode = DMA_MODE_ADMA2;
+	}
+
+	/* Map in each slot on the board and query it to see if a
+	 * card is inserted.  Use the first populated slot found.
+	 */
+	if (sd->mem_space) {
+		sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+		sd->mem_space = NULL;
+	}
+
+	full_slot = -1;
+
+	for (slot = 0; slot < num_slots; slot++) {
+		bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
+		sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
+		                                               (uintptr)bar, SDIOH_REG_WINSZ);
+
+		sd->adapter_slot = -1;
+
+		if (detect_slots) {
+			card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
+		} else {
+			card_ins = TRUE;
+		}
+
+		if (card_ins) {
+			sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
+			if (full_slot < 0)
+				full_slot = slot;
+		} else {
+			sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
+		}
+
+		if (sd->mem_space) {
+			sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ);
+			sd->mem_space = NULL;
+		}
+	}
+
+	if (full_slot < 0) {
+		sd_err(("No slots on SDIO controller are populated\n"));
+		return -1;
+	}
+
+	bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
+	sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
+
+	sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
+		full_slot,
+		(full_slot + first_bar),
+		OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
+		sd->mem_space));
+
+
+	sd->adapter_slot = full_slot;
+
+	sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
+	switch (sd->version) {
+		case 0:
+			sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
+				sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+			break;
+		case 1:
+		case 2:
+			sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
+				sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
+			break;
+		default:
+			sd_err(("%s: Host Controller version 0x%02x not supported.\n",
+			    __FUNCTION__, sd->version));
+			break;
+	}
+
+	sd->caps = sdstd_rreg(sd, SD_Capabilities);	/* Cache this for later use */
+	sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
+
+	sdstd_set_dma_mode(sd, sd->sd_dma_mode);
+
+
+	sdstd_reset(sd, 1, 0);
+
+	/* Read SD4/SD1 mode */
+	if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
+		if (reg8 & SD4_MODE) {
+			sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
+			        __FUNCTION__,  reg8));
+		}
+	}
+
+	/* Default power on mode is SD1 */
+	sd->sd_mode = SDIOH_MODE_SD1;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = full_slot;
+
+	return (SUCCESS);
+}
+#define CMD5_RETRIES 200
+static int
+get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
+{
+	int retries, status;
+
+	/* Get the Card's Operation Condition.  Occasionally the board
+	 * takes a while to become ready
+	 */
+	retries = CMD5_RETRIES;
+	do {
+		*cmd_rsp = 0;
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
+		    != SUCCESS) {
+			sd_err(("%s: CMD5 failed\n", __FUNCTION__));
+			return status;
+		}
+		sdstd_cmd_getrsp(sd, cmd_rsp, 1);
+		if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
+			sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
+	} while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
+	if (!retries)
+		return ERROR;
+
+	return (SUCCESS);
+}
+
+static int
+sdstd_client_init(sdioh_info_t *sd)
+{
+	uint32 cmd_arg, cmd_rsp;
+	int status;
+	uint8 fn_ints;
+
+
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+	/* Clear any pending ints */
+	sdstd_wreg16(sd, SD_IntrStatus, 0x1ff);
+	sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
+
+	/* Enable both Normal and Error Status.  This does not enable
+	 * interrupts, it only enables the status bits to
+	 * become 'live'
+	 */
+	sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
+	sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
+
+	sdstd_wreg16(sd, SD_IntrSignalEnable, 0);	  /* Disable ints for now. */
+
+	/* Start at ~400KHz clock rate for initialization */
+	if (!sdstd_start_clock(sd, 128)) {
+		sd_err(("sdstd_start_clock failed\n"));
+		return ERROR;
+	}
+	if (!sdstd_start_power(sd)) {
+		sd_err(("sdstd_start_power failed\n"));
+		return ERROR;
+	}
+
+	if (sd->num_funcs == 0) {
+		sd_err(("%s: No IO funcs!\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	/* In SPI mode, issue CMD0 first */
+	if (sd->sd_mode == SDIOH_MODE_SPI) {
+		cmd_arg = 0;
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
+		    != SUCCESS) {
+			sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
+			return status;
+		}
+	}
+
+	if (sd->sd_mode != SDIOH_MODE_SPI) {
+		uint16 rsp6_status;
+
+		/* Card is operational. Ask it to send an RCA */
+		cmd_arg = 0;
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
+		    != SUCCESS) {
+			sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
+			return status;
+		}
+
+		/* Verify the card status returned with the cmd response */
+		sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+		rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
+		if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
+		    GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
+		    GFIELD(rsp6_status, RSP6STAT_ERROR)) {
+			sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
+			        __FUNCTION__, rsp6_status));
+			return ERROR;
+		}
+
+		/* Save the Card's RCA */
+		sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
+		sd_info(("RCA is 0x%x\n", sd->card_rca));
+
+		if (rsp6_status)
+			sd_err(("raw status is 0x%x\n", rsp6_status));
+
+		/* Select the card */
+		cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
+		    != SUCCESS) {
+			sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
+			return status;
+		}
+		sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
+		if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
+			sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
+			        __FUNCTION__, cmd_rsp));
+			return ERROR;
+		}
+	}
+
+	sdstd_card_enablefuncs(sd);
+
+	if (!sdstd_bus_width(sd, sd_sdmode)) {
+		sd_err(("sdstd_bus_width failed\n"));
+		return ERROR;
+	}
+
+	set_client_block_size(sd, 1, BLOCK_SIZE_4318);
+	fn_ints = INTR_CTL_FUNC1_EN;
+
+	if (sd->num_funcs >= 2) {
+		set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
+		fn_ints |= INTR_CTL_FUNC2_EN;
+	}
+
+	/* Enable/Disable Client interrupts */
+	/* Turn on here but disable at host controller? */
+	if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
+	                        (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
+		sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	/* Switch to High-speed clocking mode if both host and device support it */
+	sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
+
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!sdstd_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("sdstd_start_clock failed\n"));
+		return ERROR;
+	}
+
+	sd->card_init_done = TRUE;
+
+	return SUCCESS;
+}
+
+static int
+sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
+{
+	uint32 regdata;
+	int status;
+	uint8 reg8;
+
+	reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+
+
+	if (HSMode == TRUE) {
+		if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
+			sd_err(("Host Controller does not support hi-speed mode.\n"));
+			return BCME_ERROR;
+		}
+
+		sd_info(("Attempting to enable High-Speed mode.\n"));
+
+		if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+		                                 1, &regdata)) != SUCCESS) {
+			return BCME_SDIO_ERROR;
+		}
+		if (regdata & SDIO_SPEED_SHS) {
+			sd_info(("Device supports High-Speed mode.\n"));
+
+			regdata |= SDIO_SPEED_EHS;
+
+			sd_info(("Writing %08x to Card at %08x\n",
+			         regdata, SDIOD_CCCR_SPEED_CONTROL));
+			if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+			                                  1, regdata)) != BCME_OK) {
+				return BCME_SDIO_ERROR;
+			}
+
+			if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+			                                 1, &regdata)) != BCME_OK) {
+				return BCME_SDIO_ERROR;
+			}
+
+			sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
+
+			reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
+
+			sd_err(("High-speed clocking mode enabled.\n"));
+		}
+		else {
+			sd_err(("Device does not support High-Speed Mode.\n"));
+			reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+		}
+	} else {
+		/* Force off device bit */
+		if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+		                                 1, &regdata)) != BCME_OK) {
+			return status;
+		}
+		if (regdata & SDIO_SPEED_EHS) {
+			regdata &= ~SDIO_SPEED_EHS;
+			if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
+			                                  1, regdata)) != BCME_OK) {
+				return status;
+			}
+		}
+
+		sd_err(("High-speed clocking mode disabled.\n"));
+		reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
+	}
+
+	sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+	return BCME_OK;
+}
+
+/* Select DMA Mode:
+ * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
+ * Otherwise, pick the selected mode if supported.
+ * If not supported, use PIO mode.
+ */
+static int
+sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
+{
+	uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
+	int8 prev_dma_mode = sd->sd_dma_mode;
+
+	switch (prev_dma_mode) {
+		case DMA_MODE_AUTO:
+			sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
+			          __FUNCTION__));
+			if (GFIELD(sd->caps, CAP_ADMA2)) {
+				sd->sd_dma_mode = DMA_MODE_ADMA2;
+				dma_sel_bits = SDIOH_ADMA2_MODE;
+			} else if (GFIELD(sd->caps, CAP_ADMA1)) {
+				sd->sd_dma_mode = DMA_MODE_ADMA1;
+				dma_sel_bits = SDIOH_ADMA1_MODE;
+			} else if (GFIELD(sd->caps, CAP_DMA)) {
+				sd->sd_dma_mode = DMA_MODE_SDMA;
+			} else {
+				sd->sd_dma_mode = DMA_MODE_NONE;
+			}
+			break;
+		case DMA_MODE_NONE:
+			sd->sd_dma_mode = DMA_MODE_NONE;
+			break;
+		case DMA_MODE_SDMA:
+			if (GFIELD(sd->caps, CAP_DMA)) {
+				sd->sd_dma_mode = DMA_MODE_SDMA;
+			} else {
+				sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
+				sd->sd_dma_mode = DMA_MODE_NONE;
+			}
+			break;
+		case DMA_MODE_ADMA1:
+			if (GFIELD(sd->caps, CAP_ADMA1)) {
+				sd->sd_dma_mode = DMA_MODE_ADMA1;
+				dma_sel_bits = SDIOH_ADMA1_MODE;
+			} else {
+				sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
+				sd->sd_dma_mode = DMA_MODE_NONE;
+			}
+			break;
+		case DMA_MODE_ADMA2:
+			if (GFIELD(sd->caps, CAP_ADMA2)) {
+				sd->sd_dma_mode = DMA_MODE_ADMA2;
+				dma_sel_bits = SDIOH_ADMA2_MODE;
+			} else {
+				sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
+				sd->sd_dma_mode = DMA_MODE_NONE;
+			}
+			break;
+		case DMA_MODE_ADMA2_64:
+			sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
+			sd->sd_dma_mode = DMA_MODE_NONE;
+			break;
+		default:
+			sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
+			        prev_dma_mode));
+			sd->sd_dma_mode = DMA_MODE_NONE;
+			break;
+	}
+
+	/* clear SysAddr, only used for SDMA */
+	sdstd_wreg(sd, SD_SysAddr, 0);
+
+	sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
+
+	reg8 = sdstd_rreg8(sd, SD_HostCntrl);
+	reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
+	sdstd_wreg8(sd, SD_HostCntrl, reg8);
+	sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
+
+	return BCME_OK;
+}
+
+
+bool
+sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
+{
+	uint rc, count;
+	uint16 divisor;
+
+	/* turn off HC clock */
+	sdstd_wreg16(sd, SD_ClockCntrl,
+	             sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /*  Disable the HC clock */
+
+	/* Set divisor */
+
+	divisor = (new_sd_divisor >> 1) << 8;
+
+	sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+	sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
+	sd_info(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
+	         new_sd_divisor, divisor));
+
+	sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_TO_CLKFREQ)));
+
+	if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
+		sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+		        ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
+		        ((50 % new_sd_divisor) ? "KHz" : "MHz")));
+	} else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
+		sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+		        ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
+		        ((48 % new_sd_divisor) ? "KHz" : "MHz")));
+	} else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
+		sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
+		        ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
+		        ((33 % new_sd_divisor) ? "KHz" : "MHz")));
+
+	} else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
+	} else {
+		sd_err(("Need to determine divisor for %d MHz clocks\n",
+		        GFIELD(sd->caps, CAP_TO_CLKFREQ)));
+		sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
+		return (FALSE);
+	}
+
+	sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /*  Enable the clock */
+
+	/* Wait for clock to stabilize */
+	rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+	count = 0;
+	while (!rc) {
+		OSL_DELAY(1);
+		sd_info(("Waiting for clock to become stable 0x%x\n", rc));
+		rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
+		count++;
+		if (count > 10000) {
+			sd_err(("%s:Clocks failed to stabilize after %u attempts",
+			        __FUNCTION__, count));
+			return (FALSE);
+		}
+	}
+	/* Turn on clock */
+	sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
+
+	/* Set timeout control (adjust default value based on divisor).
+	 * Disabling timeout interrupts during setting is advised by host spec.
+	 */
+	{
+		uint16 regdata;
+		uint toval;
+
+		toval = sd_toctl;
+		divisor = new_sd_divisor;
+
+		while (toval && !(divisor & 1)) {
+			toval -= 1;
+			divisor >>= 1;
+		}
+
+		regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
+		sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
+		sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
+		sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
+	}
+
+	OSL_DELAY(2);
+
+	sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
+
+	return TRUE;
+}
+
+bool
+sdstd_start_power(sdioh_info_t *sd)
+{
+	char *s;
+	uint32 cmd_arg;
+	uint32 cmd_rsp;
+	uint8 pwr = 0;
+	int volts;
+
+	volts = 0;
+	s = NULL;
+	if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
+		volts = 5;
+		s = "1.8";
+	}
+	if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
+		volts = 6;
+		s = "3.0";
+	}
+	if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
+		volts = 7;
+		s = "3.3";
+	}
+
+	pwr = SFIELD(pwr, PWR_VOLTS, volts);
+	pwr = SFIELD(pwr, PWR_BUS_EN, 1);
+	sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
+	sd_info(("Setting Bus Power to %s Volts\n", s));
+
+	/* Wait for power to stabilize, Dongle takes longer than NIC. */
+	OSL_DELAY(250000);
+
+	/* Get the Card's Operation Condition.  Occasionally the board
+	 * takes a while to become ready
+	 */
+	cmd_arg = 0;
+	cmd_rsp = 0;
+	if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
+		sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
+		sdstd_reset(sd, 0, 1);
+		return FALSE;
+	}
+
+	sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
+	sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
+	sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
+	sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+
+	/* Verify that the card supports I/O mode */
+	if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
+		sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
+		return ERROR;
+	}
+	sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
+
+	/* Examine voltage: Arasan only supports 3.3 volts,
+	 * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
+	 */
+
+	if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
+		sd_err(("This client does not support 3.3 volts!\n"));
+		return ERROR;
+	}
+	sd_info(("Leaving bus power at 3.3 Volts\n"));
+
+	cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
+	cmd_rsp = 0;
+	get_ocr(sd, &cmd_arg, &cmd_rsp);
+	sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
+	return TRUE;
+}
+
+bool
+sdstd_bus_width(sdioh_info_t *sd, int new_mode)
+{
+	uint32 regdata;
+	int status;
+	uint8 reg8;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd->sd_mode == new_mode) {
+		sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
+		/* Could exit, but continue just in case... */
+	}
+
+	/* Set client side via reg 0x7 in CCCR */
+	if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata)) != SUCCESS)
+		return (bool)status;
+	regdata &= ~BUS_SD_DATA_WIDTH_MASK;
+	if (new_mode == SDIOH_MODE_SD4) {
+		sd_info(("Changing to SD4 Mode\n"));
+		regdata |= SD4_MODE;
+	} else if (new_mode == SDIOH_MODE_SD1) {
+		sd_info(("Changing to SD1 Mode\n"));
+	} else {
+		sd_err(("SPI Mode not supported by Standard Host Controller\n"));
+	}
+
+	if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
+		return (bool)status;
+
+	/* Set host side via Host reg */
+	reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
+	if (new_mode == SDIOH_MODE_SD4)
+		reg8 |= SD4_MODE;
+	sdstd_wreg8(sd, SD_HostCntrl, reg8);
+
+	sd->sd_mode = new_mode;
+
+	return TRUE;
+}
+
+static int
+sdstd_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sdstd_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (sdstd_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+static int
+sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdstd_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+static int
+sdstd_card_enablefuncs(sdioh_info_t *sd)
+{
+	int status;
+	uint32 regdata;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	/* Enable function 1 on the card */
+	regdata = SDIO_FUNC_ENABLE_1;
+	if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
+		return status;
+
+	return SUCCESS;
+}
+
+/* Read client card reg */
+static int
+sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 rsp5;
+
+
+	cmd_arg = 0;
+
+	if ((func == 0) || (regsize == 1)) {
+		cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
+
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+		    != SUCCESS)
+			return status;
+
+		sdstd_cmd_getrsp(sd, &rsp5, 1);
+		if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
+			sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
+			        __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+		}
+
+		if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+			sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+			        __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+		if (GFIELD(rsp5, RSP5_STUFF))
+			sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+			        __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+		*data = GFIELD(rsp5, RSP5_DATA);
+	} else {
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+
+		sd->data_xfer_count = regsize;
+
+		/* sdstd_cmd_issue() returns with the command complete bit
+		 * in the ISR already cleared
+		 */
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+		    != SUCCESS)
+			return status;
+
+		sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+		if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+			sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
+			        __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
+
+		if (GFIELD(rsp5, RSP5_STUFF))
+			sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
+			        __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+		if (sd->polled_mode) {
+			volatile uint16 int_reg;
+			int retries = RETRIES_LARGE;
+
+			/* Wait for Read Buffer to become ready */
+			do {
+				int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+			} while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
+
+			if (!retries) {
+				sd_err(("%s: Timeout on Buf_Read_Ready: "
+				        "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
+				        __FUNCTION__, int_reg,
+				        sdstd_rreg16(sd, SD_ErrorIntrStatus),
+				        sdstd_rreg(sd, SD_PresentState)));
+				sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+				return (ERROR);
+			}
+
+			/* Have Buffer Ready, so clear it and read the data */
+			sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
+			if (regsize == 2)
+				*data = sdstd_rreg16(sd, SD_BufferDataPort0);
+			else
+				*data = sdstd_rreg(sd, SD_BufferDataPort0);
+
+			/* Check Status.
+			 * After the data is read, the Transfer Complete bit should be on
+			 */
+			retries = RETRIES_LARGE;
+			do {
+				int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+			} while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+			/* Check for any errors from the data phase */
+			if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+				return ERROR;
+
+			if (!retries) {
+				sd_err(("%s: Timeout on xfer complete: "
+				        "intr 0x%04x err 0x%04x state 0x%08x\n",
+				        __FUNCTION__, int_reg,
+				        sdstd_rreg16(sd, SD_ErrorIntrStatus),
+				        sdstd_rreg(sd, SD_PresentState)));
+				return (ERROR);
+			}
+
+			sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
+		}
+	}
+	if (sd->polled_mode) {
+		if (regsize == 2)
+			*data &= 0xffff;
+	}
+	return SUCCESS;
+}
+
+bool
+check_client_intr(sdioh_info_t *sd)
+{
+	uint16 raw_int, cur_int, old_int;
+
+	raw_int = sdstd_rreg16(sd, SD_IntrStatus);
+	cur_int = raw_int & sd->intmask;
+
+	if (!cur_int) {
+		/* Not an error -- might share interrupts... */
+		return FALSE;
+	}
+
+	if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
+		old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
+		sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
+
+		if (sd->client_intr_enabled && sd->use_client_ints) {
+			sd->intrcount++;
+			ASSERT(sd->intr_handler);
+			ASSERT(sd->intr_handler_arg);
+			(sd->intr_handler)(sd->intr_handler_arg);
+		} else {
+			sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+			        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+		}
+		sdstd_wreg16(sd, SD_IntrStatusEnable, old_int);
+	} else {
+		/* Local interrupt: disable, set flag, and save intrstatus */
+		sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
+		sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
+		sd->local_intrcount++;
+		sd->got_hcint = TRUE;
+		sd->last_intrstatus = cur_int;
+	}
+
+	return TRUE;
+}
+
+void
+sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
+{
+	uint16 int_reg, err_reg;
+	int retries = RETRIES_LARGE;
+
+	do {
+		int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+		err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
+	} while (--retries && !(int_reg & norm) && !(err_reg & err));
+
+	norm |= sd->intmask;
+	if (err_reg & err)
+		norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
+	sd->last_intrstatus = int_reg & norm;
+}
+
+/* write a client register */
+static int
+sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, rsp5, flags;
+
+	cmd_arg = 0;
+
+	if ((func == 0) || (regsize == 1)) {
+		cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+		cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
+		    != SUCCESS)
+			return status;
+
+		sdstd_cmd_getrsp(sd, &rsp5, 1);
+		flags = GFIELD(rsp5, RSP5_FLAGS);
+		if (flags && (flags != 0x10))
+			sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
+			        __FUNCTION__,  flags));
+	}
+	else {
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+		cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+		cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+		sd->data_xfer_count = regsize;
+
+		/* sdstd_cmd_issue() returns with the command complete bit
+		 * in the ISR already cleared
+		 */
+		if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
+		    != SUCCESS)
+			return status;
+
+		sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+		if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
+			sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
+			        __FUNCTION__,  GFIELD(rsp5, RSP5_FLAGS)));
+		if (GFIELD(rsp5, RSP5_STUFF))
+			sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+			        __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+		if (sd->polled_mode) {
+			uint16 int_reg;
+			int retries = RETRIES_LARGE;
+
+			/* Wait for Write Buffer to become ready */
+			do {
+				int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+			} while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
+
+			if (!retries) {
+				sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
+				        "errint: 0x%x PresentState 0x%x\n",
+				        __FUNCTION__, int_reg,
+				        sdstd_rreg16(sd, SD_ErrorIntrStatus),
+				        sdstd_rreg(sd, SD_PresentState)));
+				sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+				return (ERROR);
+			}
+			/* Clear Write Buf Ready bit */
+			int_reg = 0;
+			int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
+			sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+			/* At this point we have Buffer Ready, so write the data */
+			if (regsize == 2)
+				sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
+			else
+				sdstd_wreg(sd, SD_BufferDataPort0, data);
+
+			/* Wait for Transfer Complete */
+			retries = RETRIES_LARGE;
+			do {
+				int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+			} while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
+
+			/* Check for any errors from the data phase */
+			if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+				return ERROR;
+
+			if (retries == 0) {
+				sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
+				        "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
+				        __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
+				        int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
+				        sd->r_cnt, sd->t_cnt));
+			}
+			/* Clear the status bits */
+			sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
+		}
+	}
+	return SUCCESS;
+}
+
+void
+sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
+{
+	int rsp_count;
+	int respaddr = SD_Response0;
+
+	if (count > 4)
+		count = 4;
+
+	for (rsp_count = 0; rsp_count < count; rsp_count++) {
+		*rsp_buffer++ = sdstd_rreg(sd, respaddr);
+		respaddr += 4;
+	}
+}
+
+static int
+sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
+{
+	uint16 cmd_reg;
+	int retries;
+	uint32 cmd_arg;
+	uint16 xfer_reg = 0;
+
+
+	if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
+	    ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
+		sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
+		return ERROR;
+	}
+
+	retries = RETRIES_SMALL;
+	while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
+		if (retries == RETRIES_SMALL)
+			sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
+			        __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
+	}
+	if (!retries) {
+		sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
+		if (trap_errs)
+			ASSERT(0);
+		return ERROR;
+	}
+
+
+	cmd_reg = 0;
+	switch (cmd) {
+	case SDIOH_CMD_0:       /* Set Card to Idle State - No Response */
+		sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+		break;
+
+	case SDIOH_CMD_3:	/* Ask card to send RCA - Response R6 */
+		sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+		break;
+
+	case SDIOH_CMD_5:	/* Send Operation condition - Response R4 */
+		sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+		break;
+
+	case SDIOH_CMD_7:	/* Select card - Response R1 */
+		sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+		break;
+
+	case SDIOH_CMD_15:	/* Set card to inactive state - Response None */
+		sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+		break;
+
+	case SDIOH_CMD_52:	/* IO R/W Direct (single byte) - Response R5 */
+
+		sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
+			__FUNCTION__,
+			GFIELD(arg, CMD52_FUNCTION),
+			GFIELD(arg, CMD52_REG_ADDR),
+			GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
+			GFIELD(arg, CMD52_DATA)));
+
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+		break;
+
+	case SDIOH_CMD_53:	/* IO R/W Extended (multiple bytes/blocks) */
+
+		sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
+			__FUNCTION__,
+			GFIELD(arg, CMD53_FUNCTION),
+			GFIELD(arg, CMD53_REG_ADDR),
+			GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
+			GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
+			GFIELD(arg, CMD53_BYTE_BLK_CNT),
+			GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
+
+		cmd_arg = arg;
+		xfer_reg = 0;
+
+		cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
+		cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
+
+		use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
+
+		if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
+			uint16 blocksize;
+			uint16 blockcount;
+			int func;
+
+			ASSERT(sdioh_info->sd_blockmode);
+
+			func = GFIELD(cmd_arg, CMD53_FUNCTION);
+			blocksize = MIN((int)sdioh_info->data_xfer_count,
+			                sdioh_info->client_block_size[func]);
+			blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+
+			/* data_xfer_cnt is already setup so that for multiblock mode,
+			 * it is the entire buffer length.  For non-block or single block,
+			 * it is < 64 bytes
+			 */
+			if (use_dma) {
+				switch (sdioh_info->sd_dma_mode) {
+				case DMA_MODE_SDMA:
+					sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
+					      __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
+					     (uint32)sdioh_info->dma_phys));
+				sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+					break;
+				case DMA_MODE_ADMA1:
+				case DMA_MODE_ADMA2:
+					sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
+						sd_create_adma_descriptor(sdioh_info, 0,
+						sdioh_info->dma_phys, blockcount*blocksize,
+						ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
+						ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
+					/* Dump descriptor if DMA debugging is enabled. */
+					if (sd_msglevel & SDH_DMA_VAL) {
+						sd_dump_adma_dscr(sdioh_info);
+					}
+
+					sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
+					           sdioh_info->adma2_dscr_phys);
+					break;
+				default:
+					sd_err(("%s: unsupported DMA mode %d.\n",
+						__FUNCTION__, sdioh_info->sd_dma_mode));
+					break;
+				}
+			}
+
+			sd_trace(("%s: Setting block count %d, block size %d bytes\n",
+			          __FUNCTION__, blockcount, blocksize));
+			sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
+			sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
+
+			xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
+
+			if (sdioh_info->client_block_size[func] != blocksize)
+				set_client_block_size(sdioh_info, 1, blocksize);
+
+			if (blockcount > 1) {
+				xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
+				xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
+				xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+			} else {
+				xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+				xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
+				xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+			}
+
+			if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+				xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+			else
+				xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+
+			retries = RETRIES_SMALL;
+			while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+			              PRES_DAT_INHIBIT) && --retries)
+				sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+				        __FUNCTION__, cmd));
+			if (!retries) {
+				sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+				if (trap_errs)
+					ASSERT(0);
+				return ERROR;
+			}
+			sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+
+		} else {	/* Non block mode */
+			uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
+			/* The byte/block count field only has 9 bits,
+			 * so, to do a 512-byte bytemode transfer, this
+			 * field will contain 0, but we need to tell the
+			 * controller we're transferring 512 bytes.
+			 */
+			if (bytes == 0) bytes = 512;
+
+			if (use_dma)
+				sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
+
+			/* PCI: Transfer Mode register 0x0c */
+			xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
+			xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
+			if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
+				xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
+			else
+				xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
+			/* See table 2-8 Host Controller spec ver 1.00 */
+			xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
+			xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
+
+			sdstd_wreg16(sdioh_info, SD_BlockSize,  bytes);
+
+			sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
+
+			retries = RETRIES_SMALL;
+			while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
+			              PRES_DAT_INHIBIT) && --retries)
+				sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
+				        __FUNCTION__, cmd));
+			if (!retries) {
+				sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
+				if (trap_errs)
+					ASSERT(0);
+				return ERROR;
+			}
+			sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
+		}
+		break;
+
+	default:
+		sd_err(("%s: Unknown command\n", __FUNCTION__));
+		return ERROR;
+	}
+
+	if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
+		cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
+		cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
+	}
+
+	/* Setup and issue the SDIO command */
+	sdstd_wreg(sdioh_info, SD_Arg0, arg);
+	sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
+
+	/* If we are in polled mode, wait for the command to complete.
+	 * In interrupt mode, return immediately. The calling function will
+	 * know that the command has completed when the CMDATDONE interrupt
+	 * is asserted
+	 */
+	if (sdioh_info->polled_mode) {
+		uint16 int_reg = 0;
+		int retries = RETRIES_LARGE;
+
+		do {
+			int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
+		} while (--retries &&
+		         (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
+		         (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
+
+		if (!retries) {
+			sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
+			        "error stat 0x%x state 0x%x\n",
+			        __FUNCTION__, int_reg,
+			        sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
+			        sdstd_rreg(sdioh_info, SD_PresentState)));
+
+			/* Attempt to reset CMD line when we get a CMD timeout */
+			sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
+			retries = RETRIES_LARGE;
+			do {
+				sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
+			} while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
+			                 SW_RESET_CMD)) && retries--);
+
+			if (!retries) {
+				sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
+			}
+
+			if (trap_errs)
+				ASSERT(0);
+			return (ERROR);
+		}
+
+		/* Clear Command Complete interrupt */
+		int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
+		sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
+
+		/* Check for Errors */
+		if (sdstd_check_errs(sdioh_info, cmd, arg)) {
+			if (trap_errs)
+				ASSERT(0);
+			return ERROR;
+		}
+	}
+	return SUCCESS;
+}
+
+
+static int
+sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 rsp5;
+	uint16 int_reg, int_bit;
+	uint flags;
+	int num_blocks, blocksize;
+	bool local_blockmode, local_dma;
+	bool read = rw == SDIOH_READ ? 1 : 0;
+	bool yield = FALSE;
+
+	ASSERT(nbytes);
+
+	cmd_arg = 0;
+
+	sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+	if (read) sd->r_cnt++; else sd->t_cnt++;
+
+	local_blockmode = sd->sd_blockmode;
+	local_dma = USE_DMA(sd);
+
+	/* Don't bother with block mode on small xfers */
+	if (nbytes < sd->client_block_size[func]) {
+		sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
+		         nbytes, sd->client_block_size[func]));
+		local_blockmode = FALSE;
+		local_dma = FALSE;
+	}
+
+	if (local_blockmode) {
+		blocksize = MIN(sd->client_block_size[func], nbytes);
+		num_blocks = nbytes/blocksize;
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
+	} else {
+		num_blocks =  1;
+		blocksize = nbytes;
+		cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
+		cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
+	}
+
+	if (local_dma && !read) {
+		bcopy(data, sd->dma_buf, nbytes);
+		sd_sync_dma(sd, read, nbytes);
+	}
+
+	if (fifo)
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
+
+	cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
+	if (read)
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
+	else
+		cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
+
+	sd->data_xfer_count = nbytes;
+
+	/* sdstd_cmd_issue() returns with the command complete bit
+	 * in the ISR already cleared
+	 */
+	if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
+		return status;
+	}
+
+	sdstd_cmd_getrsp(sd, &rsp5, 1);
+
+	if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
+		sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
+		        "numblocks %d, blocksize %d\n",
+		        __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
+
+		if (flags & 1)
+			sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
+			        "bytes %d dma %d\n",
+			        __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
+			        GFIELD(cmd_arg, CMD53_BLK_MODE)));
+		if (flags & 0x8)
+			sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
+
+		sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
+		        __FUNCTION__,  flags));
+		if (trap_errs)
+			ASSERT(0);
+		return ERROR;
+	}
+
+	if (GFIELD(rsp5, RSP5_STUFF))
+		sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
+		        __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
+
+#ifdef BCMSDYIELD
+	yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
+#endif
+
+	if (!local_dma) {
+		int bytes, i;
+		uint32 tmp;
+		for (i = 0; i < num_blocks; i++) {
+			int words;
+
+			/* Decide which status bit we're waiting for */
+			if (read)
+				int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
+			else
+				int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
+
+			/* If not on, wait for it (or for xfer error) */
+			int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+			if (!(int_reg & int_bit))
+				int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield);
+
+			/* Confirm we got the bit w/o error */
+			if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
+				sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
+				        "errint: 0x%x PresentState 0x%x\n",
+				        __FUNCTION__, read ? "Read" : "Write", int_reg,
+				        sdstd_rreg16(sd, SD_ErrorIntrStatus),
+				        sdstd_rreg(sd, SD_PresentState)));
+				sdstd_dumpregs(sd);
+				sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
+				return (ERROR);
+			}
+
+			/* Clear Buf Ready bit */
+			sdstd_wreg16(sd, SD_IntrStatus, int_bit);
+
+			/* At this point we have Buffer Ready, write the data 4 bytes at a time */
+			for (words = blocksize/4; words; words--) {
+				if (read)
+					*data = sdstd_rreg(sd, SD_BufferDataPort0);
+				else
+					sdstd_wreg(sd, SD_BufferDataPort0, *data);
+				data++;
+			}
+
+			bytes = blocksize % 4;
+
+			/* If no leftover bytes, go to next block */
+			if (!bytes)
+				continue;
+
+			switch (bytes) {
+			case 1:
+				/* R/W 8 bits */
+				if (read)
+					*(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
+				else
+					sdstd_wreg8(sd, SD_BufferDataPort0,
+					            (uint8)(*(data++) & 0xff));
+				break;
+			case 2:
+				/* R/W 16 bits */
+				if (read)
+					*(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+				else
+					sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
+				break;
+			case 3:
+				/* R/W 24 bits:
+				 * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
+				 */
+				if (read) {
+					tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
+					tmp |= ((uint32)(sdstd_rreg8(sd,
+					                             SD_BufferDataPort1)) << 16);
+					*(data++) = tmp;
+				} else {
+					tmp = *(data++);
+					sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
+					sdstd_wreg8(sd, SD_BufferDataPort1,
+					            (uint8)((tmp >> 16) & 0xff));
+				}
+				break;
+			default:
+				sd_err(("%s: Unexpected bytes leftover %d\n",
+				        __FUNCTION__, bytes));
+				ASSERT(0);
+				break;
+			}
+		}
+	}	/* End PIO processing */
+
+	/* Wait for Transfer Complete or Transfer Error */
+	int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
+
+	/* If not on, wait for it (or for xfer error) */
+	int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+	if (!(int_reg & int_bit))
+		int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield);
+
+	/* Check for any errors from the data phase */
+	if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
+		return ERROR;
+
+	/* May have gotten a software timeout if not blocking? */
+	int_reg = sdstd_rreg16(sd, SD_IntrStatus);
+	if (!(int_reg & int_bit)) {
+		sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
+		        "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
+		        __FUNCTION__, read ? "R" : "W", local_dma,
+		        sdstd_rreg(sd, SD_PresentState), int_reg,
+		        sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
+		        sd->r_cnt, sd->t_cnt));
+		sdstd_dumpregs(sd);
+		return ERROR;
+	}
+
+	/* Clear the status bits */
+	int_reg = int_bit;
+	if (local_dma) {
+		/* DMA Complete */
+		/* Reads in particular don't have DMA_COMPLETE set */
+		int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
+	}
+	sdstd_wreg16(sd, SD_IntrStatus, int_reg);
+
+	/* Fetch data */
+	if (local_dma && read) {
+		sd_sync_dma(sd, read, nbytes);
+		bcopy(sd->dma_buf, data, nbytes);
+	}
+	return SUCCESS;
+}
+
+static int
+set_client_block_size(sdioh_info_t *sd, int func, int block_size)
+{
+	int base;
+	int err = 0;
+
+
+	sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
+	sd->client_block_size[func] = block_size;
+
+	/* Set the block size in the SDIO Card register */
+	base = func * SDIOD_FBR_SIZE;
+	err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
+	if (!err) {
+		err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
+		                          (block_size >> 8) & 0xff);
+	}
+
+	/* Do not set the block size in the SDIO Host register, that
+	 * is func dependent and will get done on an individual
+	 * transaction basis
+	 */
+
+	return (err ? BCME_SDIO_ERROR : 0);
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	uint8 hreg;
+
+	/* Reset the attached device (use slower clock for safety) */
+	sdstd_start_clock(si, 128);
+	sdstd_reset(si, 0, 1);
+
+	/* Reset portions of the host state accordingly */
+	hreg = sdstd_rreg8(si, SD_HostCntrl);
+	hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
+	hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
+	si->sd_mode = SDIOH_MODE_SD1;
+
+	/* Reinitialize the card */
+	si->card_init_done = FALSE;
+	return sdstd_client_init(si);
+}
+
+
+static void
+sd_map_dma(sdioh_info_t * sd)
+{
+
+	void *va;
+
+	if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE,
+		&sd->dma_start_phys, 0x12, 12)) == NULL) {
+		sd->sd_dma_mode = DMA_MODE_NONE;
+		sd->dma_start_buf = 0;
+		sd->dma_buf = (void *)0;
+		sd->dma_phys = 0;
+		sd->alloced_dma_size = SD_PAGE;
+		sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
+	} else {
+		sd->dma_start_buf = va;
+		sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+		sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
+		sd->alloced_dma_size = SD_PAGE;
+		sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n",
+		        __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys));
+		sd_fill_dma_data_buf(sd, 0xA5);
+	}
+
+	if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE,
+		&sd->adma2_dscr_start_phys, 0x12, 12)) == NULL) {
+		sd->sd_dma_mode = DMA_MODE_NONE;
+		sd->adma2_dscr_start_buf = 0;
+		sd->adma2_dscr_buf = (void *)0;
+		sd->adma2_dscr_phys = 0;
+		sd->alloced_adma2_dscr_size = 0;
+		sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
+		        "Disabling DMA support.\n", __FUNCTION__));
+	} else {
+		sd->adma2_dscr_start_buf = va;
+		sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
+		sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
+		sd->alloced_adma2_dscr_size = SD_PAGE;
+	}
+
+	sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n",
+	        __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
+	        sd->adma2_dscr_phys));
+	sd_clear_adma_dscr_buf(sd);
+}
+
+static void
+sd_unmap_dma(sdioh_info_t * sd)
+{
+	if (sd->dma_start_buf) {
+		DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
+			sd->dma_start_phys, 0x12);
+	}
+
+	if (sd->adma2_dscr_start_buf) {
+		DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
+		                    sd->adma2_dscr_start_phys, 0x12);
+	}
+}
+
+static void sd_clear_adma_dscr_buf(sdioh_info_t *sd)
+{
+	bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
+	sd_dump_adma_dscr(sd);
+}
+
+static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
+{
+	memset((char *)sd->dma_buf, data, SD_PAGE);
+}
+
+
+static void sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
+                                      uint32 addr_phys, uint16 length, uint16 flags)
+{
+	adma2_dscr_32b_t *adma2_dscr_table;
+	adma1_dscr_t *adma1_dscr_table;
+
+	adma2_dscr_table = sd->adma2_dscr_buf;
+	adma1_dscr_table = sd->adma2_dscr_buf;
+
+	switch (sd->sd_dma_mode) {
+		case DMA_MODE_ADMA2:
+			sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
+				__FUNCTION__, index));
+
+			adma2_dscr_table[index].phys_addr = addr_phys;
+			adma2_dscr_table[index].len_attr = length << 16;
+			adma2_dscr_table[index].len_attr |= flags;
+			break;
+		case DMA_MODE_ADMA1:
+			/* ADMA1 requires two descriptors, one for len
+			 * and the other for data transfer
+			 */
+			index <<= 1;
+
+			sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
+				__FUNCTION__, index));
+
+			adma1_dscr_table[index].phys_addr_attr = length << 12;
+			adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
+			                                           ADMA2_ATTRIBUTE_VALID);
+			adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
+			adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
+			break;
+		default:
+			sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
+				__FUNCTION__, sd->sd_dma_mode));
+			break;
+	}
+}
+
+
+static void sd_dump_adma_dscr(sdioh_info_t *sd)
+{
+	adma2_dscr_32b_t *adma2_dscr_table;
+	adma1_dscr_t *adma1_dscr_table;
+	uint32 i = 0;
+	uint16 flags;
+	char flags_str[32];
+
+	ASSERT(sd->adma2_dscr_buf != NULL);
+
+	adma2_dscr_table = sd->adma2_dscr_buf;
+	adma1_dscr_table = sd->adma2_dscr_buf;
+
+	switch (sd->sd_dma_mode) {
+		case DMA_MODE_ADMA2:
+			sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
+				SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
+			sd_err((" #[Descr VA  ]  Buffer PA  | Len    | Flags  (5:4  2   1   0)"
+			        "     |\n"));
+			while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
+				flags = adma2_dscr_table->len_attr & 0xFFFF;
+				sprintf(flags_str, "%s%s%s%s",
+					((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+					ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+					((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+					ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+					((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+					ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP  " : "RSV  ",
+					(flags & ADMA2_ATTRIBUTE_INT ? "INT " : "    "),
+					(flags & ADMA2_ATTRIBUTE_END ? "END " : "    "),
+					(flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+				sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
+				        i, adma2_dscr_table, adma2_dscr_table->phys_addr,
+				        adma2_dscr_table->len_attr >> 16, flags, flags_str));
+				i++;
+
+				/* Follow LINK descriptors or skip to next. */
+				if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+				     ADMA2_ATTRIBUTE_ACT_LINK) {
+					adma2_dscr_table = phys_to_virt(
+					    adma2_dscr_table->phys_addr);
+				} else {
+					adma2_dscr_table++;
+				}
+
+			}
+			break;
+		case DMA_MODE_ADMA1:
+			sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
+			         SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
+			sd_err((" #[Descr VA  ]  Buffer PA  | Flags  (5:4  2   1   0)     |\n"));
+
+			for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
+				flags = adma1_dscr_table->phys_addr_attr & 0x3F;
+				sprintf(flags_str, "%s%s%s%s",
+					((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+					ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
+					((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+					ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
+					((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+					ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP  " : "SET  ",
+					(flags & ADMA2_ATTRIBUTE_INT ? "INT " : "    "),
+					(flags & ADMA2_ATTRIBUTE_END ? "END " : "    "),
+					(flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
+				sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
+				        i, adma1_dscr_table,
+				        adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
+				        flags, flags_str));
+
+				/* Follow LINK descriptors or skip to next. */
+				if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
+				     ADMA2_ATTRIBUTE_ACT_LINK) {
+					adma1_dscr_table = phys_to_virt(
+						adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
+				} else {
+					adma1_dscr_table++;
+				}
+			}
+			break;
+		default:
+			sd_err(("Unknown DMA Descriptor Table Format.\n"));
+			break;
+	}
+}
+
+static void sdstd_dumpregs(sdioh_info_t *sd)
+{
+	sd_err(("IntrStatus:       0x%04x ErrorIntrStatus       0x%04x\n",
+	            sdstd_rreg16(sd, SD_IntrStatus),
+	            sdstd_rreg16(sd, SD_ErrorIntrStatus)));
+	sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
+	            sdstd_rreg16(sd, SD_IntrStatusEnable),
+	            sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
+	sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
+	            sdstd_rreg16(sd, SD_IntrSignalEnable),
+	            sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
+}
diff --git a/drivers/net/wireless/bcm4329/bcmsdstd_linux.c b/drivers/net/wireless/bcm4329/bcmsdstd_linux.c
new file mode 100644
index 0000000..a8b98e2
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmsdstd_linux.c
@@ -0,0 +1,251 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver - linux portion
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd_linux.c,v 1.11.18.2.16.1 2010/08/17 17:03:13 Exp $
+ */
+
+#include <typedefs.h>
+#include <pcicfg.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <bcmsdstd.h>
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE()	(!in_atomic())
+#else
+#define BLOCKABLE()	(!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdstd_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+	sdioh_info_t *sd;
+	struct sdos_info *sdos;
+	bool ours;
+
+	sd = (sdioh_info_t *)dev_id;
+
+	if (!sd->card_init_done) {
+		sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+		return IRQ_RETVAL(FALSE);
+	} else {
+		ours = check_client_intr(sd);
+
+		/* For local interrupts, wake the waiting process */
+		if (ours && sd->got_hcint) {
+			sd_trace(("INTR->WAKE\n"));
+			sdos = (struct sdos_info *)sd->sdos_info;
+			wake_up_interruptible(&sdos->intr_wait_queue);
+		}
+		return IRQ_RETVAL(ours);
+	}
+}
+
+/* Register with Linux for interrupts */
+int
+sdstd_register_irq(sdioh_info_t *sd, uint irq)
+{
+	sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+	if (request_irq(irq, sdstd_isr, IRQF_SHARED, "bcmsdstd", sd) < 0) {
+		sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+		return ERROR;
+	}
+	return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+sdstd_free_irq(uint irq, sdioh_info_t *sd)
+{
+	free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+
+uint32 *
+sdstd_reg_map(osl_t *osh, int32 addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+sdstd_reg_unmap(osl_t *osh, int32 addr, int size)
+{
+	REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+sdstd_osinit(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+	sd->sdos_info = (void*)sdos;
+	if (sdos == NULL)
+		return BCME_NOMEM;
+
+	sdos->sd = sd;
+	spin_lock_init(&sdos->lock);
+	init_waitqueue_head(&sdos->intr_wait_queue);
+	return BCME_OK;
+}
+
+void
+sdstd_osfree(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+	ASSERT(sd && sd->sdos_info);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	if (!(sd->host_init_done && sd->card_init_done)) {
+		sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+		sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	/* Ensure atomicity for enable/disable calls */
+	spin_lock_irqsave(&sdos->lock, flags);
+
+	sd->client_intr_enabled = enable;
+	if (enable && !sd->lockcount)
+		sdstd_devintr_on(sd);
+	else
+		sdstd_devintr_off(sd);
+
+	spin_unlock_irqrestore(&sdos->lock, flags);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+sdstd_lock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (sd->lockcount) {
+		sd_err(("%s: Already locked! called from %p\n",
+		       __FUNCTION__,
+		       __builtin_return_address(0)));
+		ASSERT(sd->lockcount == 0);
+	}
+	sdstd_devintr_off(sd);
+	sd->lockcount++;
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+sdstd_unlock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+	ASSERT(sd->lockcount > 0);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+		sdstd_devintr_on(sd);
+	}
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+uint16
+sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+
+#ifndef BCMSDYIELD
+	ASSERT(!yield);
+#endif
+	sd_trace(("%s: int 0x%02x err 0x%02x yield %d canblock %d\n",
+	          __FUNCTION__, norm, err, yield, BLOCKABLE()));
+
+	/* Clear the "interrupt happened" flag and last intrstatus */
+	sd->got_hcint = FALSE;
+	sd->last_intrstatus = 0;
+
+#ifdef BCMSDYIELD
+	if (yield && BLOCKABLE()) {
+		/* Enable interrupts, wait for the indication, then disable */
+		sdstd_intrs_on(sd, norm, err);
+		wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+		sdstd_intrs_off(sd, norm, err);
+	} else
+#endif /* BCMSDYIELD */
+	{
+		sdstd_spinbits(sd, norm, err);
+	}
+
+	sd_trace(("%s: last_intrstatus 0x%04x\n", __FUNCTION__, sd->last_intrstatus));
+
+	return sd->last_intrstatus;
+}
diff --git a/drivers/net/wireless/bcm4329/bcmspibrcm.c b/drivers/net/wireless/bcm4329/bcmspibrcm.c
new file mode 100644
index 0000000..0f131a4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmspibrcm.c
@@ -0,0 +1,1726 @@
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.c,v 1.11.2.10.2.9.6.11 2009/05/21 13:21:57 Exp $
+ */
+
+#define HSMODE
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h>
+#include <spid.h>
+
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+#include <sdio.h>
+
+#include <pcicfg.h>
+
+
+#include <bcmspibrcm.h>
+#include <bcmspi.h>
+
+#define F0_RESPONSE_DELAY	16
+#define F1_RESPONSE_DELAY	16
+#define F2_RESPONSE_DELAY	F0_RESPONSE_DELAY
+
+#define CMDLEN		4
+
+#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE)
+
+/* Globals */
+uint sd_msglevel = 0;
+
+uint sd_hiok = FALSE;		/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI;		/* Use SD4 mode by default */
+uint sd_f2_blocksize = 64;		/* Default blocksize */
+
+
+uint sd_divisor = 2;
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_crc = 0;		/* Default to SPI CRC Check turned OFF */
+
+uint8	spi_outbuf[SPI_MAX_PKT_LEN];
+uint8	spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN	128
+uint8	spi_outbuf2[BUF2_PKT_LEN];
+uint8	spi_inbuf2[BUF2_PKT_LEN];
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                           uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+                                 uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (spi_osinit(sd) != 0) {
+		sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->bar0 = bar0;
+	sd->irq = irq;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->use_client_ints = TRUE;
+	sd->sd_use_dma = FALSE;	/* DMA Not supported */
+
+	/* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+	 * mode
+	 */
+	sd->wordlen = 2;
+
+	if (!spi_hw_attach(sd)) {
+		sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (bcmspi_driver_init(sd) != SUCCESS) {
+		sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (spi_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd) {
+		sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+		spi_free_irq(sd->irq, sd);
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return 0;
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_query_device(sdioh_info_t *sd)
+{
+	/* Return a BRCM ID appropriate to the dongle class */
+	return (sd->num_funcs > 1) ? BCM4329_D11NDUAL_ID : BCM4318_D11G_ID;
+}
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+	return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+	sd->chip = chip;
+	sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+	uint8 reg = 0;
+	int status;
+
+	if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+
+	if (set) {
+		reg |= DWORD_PKT_LEN_EN;
+		sd->dwordmode = TRUE;
+		sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+	} else {
+		reg &= ~DWORD_PKT_LEN_EN;
+		sd->dwordmode = FALSE;
+		sd->client_block_size[SPI_FUNC_2] = 2048;
+	}
+
+	if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+}
+
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_SPIERRSTATS,
+	IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+	{"spi_respdelay",	IOV_RESP_DELAY_ALL,	0,	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+/*
+	sdioh_regs_t *regs;
+*/
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!spi_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("%s: set clock failed\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+
+		if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+			sd_err(("%s: Failed changing highspeed mode to %d.\n",
+			        __FUNCTION__, sd_hiok));
+			bcmerror = BCME_ERROR;
+			return ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	case IOV_GVAL(IOV_SPIERRSTATS):
+	{
+		bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SPIERRSTATS):
+	{
+		bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_GVAL(IOV_RESP_DELAY_ALL):
+		int_val = (int32)si->resp_delay_all;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RESP_DELAY_ALL):
+		si->resp_delay_all = (bool)int_val;
+		int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+		if (si->resp_delay_all)
+			int_val |= RESP_DELAY_ALL;
+		else {
+			if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+			     F1_RESPONSE_DELAY) != SUCCESS) {
+				sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				break;
+			}
+		}
+
+		if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+		     != SUCCESS) {
+			sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+
+	if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+		uint8 dummy_data;
+		status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+		if (status) {
+			sd_err(("sdioh_cfg_read() failed.\n"));
+			return status;
+		}
+	}
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 cis_byte;
+	uint16 *cis = (uint16 *)cisd;
+	uint bar0 = SI_ENUM_BASE;
+	int status;
+	uint8 data;
+
+	sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+	spi_lock(sd);
+
+	/* Set sb window address to 0x18000000 */
+	data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+	status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+	if (status == SUCCESS) {
+		data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+	if (status == SUCCESS) {
+		data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+
+	offset =  CC_OTP; /* OTP offset in chipcommon. */
+	for (count = 0; count < length/2; count++) {
+		if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			spi_unlock(sd);
+			return (BCME_ERROR);
+		}
+
+		*cis = (uint16)cis_byte;
+		cis++;
+		offset += 2;
+	}
+
+	spi_unlock(sd);
+
+	return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	spi_lock(sd);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, rw, func,
+	         regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma,
+	                              cmd_arg, &data, 1)) != SUCCESS) {
+		spi_unlock(sd);
+		return status;
+	}
+
+	if (rw == SDIOH_READ)
+		*byte = (uint8)data;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+
+	spi_lock(sd);
+
+	if (rw == SDIOH_READ)
+		status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+	else
+		status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+	spi_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+	spi_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks. */
+	while (buflen > 0) {
+		len = MIN(sd->client_block_size[func], buflen);
+		if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			sd_err(("%s: bcmspi_card_buf %s failed\n",
+				__FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+	uint32 cmd_arg;
+	uint32 datalen = 1;
+	uint32 hostlen;
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf2 = bcmswap32(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint16 *)spi_outbuf2 = bcmswap16(cmd_arg & 0xffff);
+		*(uint16 *)&spi_outbuf2[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16);
+		if (datalen & 0x1)
+			datalen++;
+	} else {
+		sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer  */
+	if (datalen != 0) {
+			if (sd->wordlen == 4) { /* 32bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = bcmswap32(byte);
+			} else if (sd->wordlen == 2) { /* 16bit spid */
+				*(uint16 *)&spi_outbuf2[CMDLEN] = bcmswap16(byte & 0xffff);
+				*(uint16 *)&spi_outbuf2[CMDLEN + 2] =
+					bcmswap16((byte & 0xffff0000) >> 16);
+			}
+	}
+
+	/* +4 for cmd, +4 for dstatus */
+	hostlen = datalen + 8;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16));
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+	if (sd->resp_delay_all == FALSE)
+		return (BCME_OK);
+
+	if (sd->prev_fun == func)
+		return (BCME_OK);
+
+	if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+		return (BCME_OK);
+
+	bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+	/* Remember function for which to avoid reprogramming resp-delay in next iteration */
+	sd->prev_fun = func;
+
+	return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN	0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+	uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	*(uint32 *)spi_outbuf2 = cmd_arg;
+
+	/* for Write, put the data into the output buffer  */
+	*(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+	/* +4 for cmd, +4 for dstatus */
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16));
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+	uint32 dstatus = sd->card_dstatus;
+	struct spierrstats_t *spierrstats = &sd->spierrstats;
+	int err = SUCCESS;
+
+	sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+	/* Store dstatus of last few gSPI transactions */
+	spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+	spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+	dstatus_count++;
+
+	if (sd->card_init_done == FALSE)
+		return err;
+
+	if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+		spierrstats->dna++;
+		sd_trace(("Read data not available on F1 addr = 0x%x\n",
+		        GFIELD(cmd_arg, SPI_REG_ADDR)));
+		/* Clear dna bit */
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+	}
+
+	if (dstatus & STATUS_UNDERFLOW) {
+		spierrstats->rdunderflow++;
+		sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+	}
+
+	if (dstatus & STATUS_OVERFLOW) {
+		spierrstats->wroverflow++;
+		sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+		if ((sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 0)) {
+			bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+			bcmspi_resync_f1(sd);
+			sd_err(("Recovering from F1 FIFO overflow.\n"));
+		} else {
+			err = ERROR_OF;
+		}
+	}
+
+	if (dstatus & STATUS_F2_INTR) {
+		spierrstats->f2interrupt++;
+		sd_trace(("Interrupt from F2.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_F3_INTR) {
+		spierrstats->f3interrupt++;
+		sd_err(("Interrupt from F3.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+		spierrstats->hostcmddataerr++;
+		sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+	}
+
+	if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+		spierrstats->f2pktavailable++;
+		sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+		sd_trace(("Packet length = %d\n", sd->dwordmode ?
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+	}
+
+	if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+		spierrstats->f3pktavailable++;
+		sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+		sd_err(("Packet length = %d\n",
+		        (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+	}
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+	return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+	/* Default power on mode */
+	sd->sd_mode = SDIOH_MODE_SPI;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = 1;
+
+	return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+	uint32 regdata[2];
+	int status;
+
+	/* Find F1/F2/F3 max packet size */
+	if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+	                                 8, regdata)) != SUCCESS) {
+		return status;
+	}
+
+	sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+	        regdata[0], regdata[1]));
+
+	sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+	ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+	sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+	ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+	sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+	ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+	return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+	uint32	status_en_reg = 0;
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifdef HSMODE
+	if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#else
+	/* Start at ~400KHz clock rate for initialization */
+	if (!spi_start_clock(sd, 128)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	if (!bcmspi_host_device_init_adapt(sd)) {
+		sd_err(("bcmspi_host_device_init_adapt failed\n"));
+		return ERROR;
+	}
+
+	if (!bcmspi_test_card(sd)) {
+		sd_err(("bcmspi_test_card failed\n"));
+		return ERROR;
+	}
+
+	sd->num_funcs = SPI_MAX_IOFUNCS;
+
+	get_client_blocksize(sd);
+
+	/* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+	bcmspi_resync_f1(sd);
+
+	sd->dwordmode = FALSE;
+
+	bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+	sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+	status_en_reg |= INTR_WITH_STATUS;
+
+
+	if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+	    status_en_reg & 0xff) != SUCCESS) {
+		sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+		return ERROR;
+	}
+
+
+#ifndef HSMODE
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!spi_start_clock(sd, 4)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	sd->card_init_done = TRUE;
+
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+	                                 4, &regdata)) != SUCCESS)
+		return status;
+
+	sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+
+	if (hsmode == TRUE) {
+		sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			sd_trace(("Device is already in High-Speed mode.\n"));
+			return status;
+		} else {
+			regdata |= HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS) {
+				return status;
+			}
+		}
+	} else {
+		sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			regdata &= ~HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS)
+				return status;
+		}
+		 else {
+			sd_trace(("Device is already in Low-Speed mode.\n"));
+			return status;
+		}
+	}
+
+	spi_controller_highspeed_mode(sd, hsmode);
+
+	return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+	sd->wordlen = 2; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd->wordlen = 4; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd_trace(("Silicon testability issue: regdata = 0x%x." \
+	          " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata));	\
+	OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP		100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+	uint32 wrregdata, regdata = 0;
+	int status;
+	int i;
+
+	/* Due to a silicon testability issue, the first command from the Host
+	 * to the device will get corrupted (first bit will be lost). So the
+	 * Host should poll the device with a safe read request. ie: The Host
+	 * should try to read F0 addr 0x14 using the Fixed address mode
+	 * (This will prevent a unintended write command to be detected by device)
+	 */
+	for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+		/* If device was not power-cycled it will stay in 32bit mode with
+		 * response-delay-all bit set.  Alternate the iteration so that
+		 * read either with or without response-delay for F0 to succeed.
+		 */
+		bcmspi_find_curr_mode(sd);
+		sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = TRUE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = FALSE;
+	}
+
+	/* Bail out, device not detected */
+	if (i == INIT_ADAPT_LOOP)
+		return FALSE;
+
+	/* Softreset the spid logic */
+	if ((sd->dwordmode) || (sd->wordlen == 4)) {
+		bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+		bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, &regdata);
+		sd_trace(("reset reg read = 0x%x\n", regdata));
+		sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+		       sd->wordlen, sd->resp_delay_all));
+		/* Restore default state after softreset */
+		sd->wordlen = 2;
+		sd->dwordmode = FALSE;
+	}
+
+	if (sd->wordlen == 4) {
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) !=
+		     SUCCESS)
+				return FALSE;
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+			          regdata));
+			sd_trace(("Spid power was left on.\n"));
+		} else {
+			sd_err(("Spid power was left on but signature read failed."
+			        " Value read = 0x%x\n", regdata));
+			return FALSE;
+		}
+	} else {
+		sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT	0x00010430 /* according to the host m/c */
+
+		wrregdata = (CTRL_REG_DEFAULT);
+		sd->resp_delay_all = TRUE;
+		if (sd->resp_delay_all == TRUE) {
+			/* Enable response delay for all */
+			wrregdata |= (RESP_DELAY_ALL << 16);
+			/* Program response delay value */
+			wrregdata &= 0xffff00ff;
+			wrregdata |= (F1_RESPONSE_DELAY << 8);
+			sd->prev_fun = SPI_FUNC_1;
+			bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+		}
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+		sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+		wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+		wrregdata &= ~HIGH_SPEED_MODE;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+		for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+			if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+				sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+				if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+				     &regdata)) != SUCCESS)
+					return FALSE;
+			}
+			OSL_DELAY(1000);
+		}
+
+
+		/* Change to host controller intr-polarity of active-low */
+		wrregdata &= ~INTR_POLARITY;
+		sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+		        wrregdata));
+		/* Change to 32bit mode */
+		wrregdata |= WORD_LENGTH_32;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+		/* Change command/data packaging in 32bit LE mode */
+		sd->wordlen = 4;
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+			sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+		} else {
+			sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+			  "0x%x\n", regdata));
+			return FALSE;
+		}
+	}
+
+
+	return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+		return FALSE;
+
+	if (regdata == (TEST_RO_DATA_32BIT_LE))
+		sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+	else {
+		sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+		return FALSE;
+	}
+
+
+#define RW_PATTERN1	0xA0A1A2A3
+#define RW_PATTERN2	0x4B5B6B7B
+
+	regdata = RW_PATTERN1;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN1) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN1, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	regdata = RW_PATTERN2;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN2) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN2, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((bcmspi_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (bcmspi_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func,
+	         regaddr, *data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize))
+	    != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);	/* Fixed access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize))
+	    != SUCCESS)
+		return status;
+
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func,
+	         regaddr, *data));
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	sd_trace(("dstatus =0x%x\n", dstatus));
+	return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 1, func,
+	         regaddr, data));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize))
+	    != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, func,
+	         regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma,
+	                              cmd_arg, &data, 1)) != SUCCESS) {
+		return status;
+	}
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+	*dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                uint32 *data, uint32 datalen)
+{
+	uint32	i, j;
+	uint8	resp_delay = 0;
+	int	err = SUCCESS;
+	uint32	hostlen;
+	uint32 spilen = 0;
+	uint32 dstatus_idx = 0;
+	uint16 templen, buslen, len, *ptr = NULL;
+
+	sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+
+	if (DWORDMODE_ON) {
+		spilen = GFIELD(cmd_arg, SPI_LEN);
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) ||
+		    (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1))
+			dstatus_idx = spilen * 3;
+
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			spilen = spilen << 2;
+			dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0;
+			/* convert len to mod16 size */
+			spilen = ROUNDUP(spilen, 16);
+			cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+		}
+	}
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf = bcmswap32(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint16 *)spi_outbuf = bcmswap16(cmd_arg & 0xffff);
+		*(uint16 *)&spi_outbuf[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16);
+		if (datalen & 0x1)
+			datalen++;
+		if (datalen < 4)
+			datalen = ROUNDUP(datalen, 4);
+	} else {
+		sd_err(("Host is %d bit spid, could not create SPI command.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+		/* We send len field of hw-header always a mod16 size, both from host and dongle */
+		if (DWORDMODE_ON) {
+			if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				/* ASSERT(*ptr == ~*(ptr + 1)); */
+				templen = ROUNDUP(templen, 16);
+				*ptr = templen;
+				sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1))));
+			}
+		}
+
+		if (datalen != 0) {
+			for (i = 0; i < datalen/4; i++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						bcmswap32(data[i]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					*(uint16 *)&spi_outbuf[i * 4 + CMDLEN] =
+						bcmswap16(data[i] & 0xffff);
+					*(uint16 *)&spi_outbuf[i * 4 + CMDLEN + 2] =
+						bcmswap16((data[i] & 0xffff0000) >> 16);
+				}
+			}
+		}
+	}
+
+	/* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) {
+		int func = GFIELD(cmd_arg, SPI_FUNCTION);
+		switch (func) {
+			case 0:
+				resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+				break;
+			case 1:
+				resp_delay = F1_RESPONSE_DELAY;
+				break;
+			case 2:
+				resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+				break;
+			default:
+				ASSERT(0);
+				break;
+		}
+		/* Program response delay */
+	        bcmspi_prog_resp_delay(sd, func, resp_delay);
+	}
+
+	/* +4 for cmd and +4 for dstatus */
+	hostlen = datalen + 8 + resp_delay;
+	hostlen += dstatus_idx;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+	/* for Read, get the data into the input buffer */
+	if (datalen != 0) {
+		if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+			for (j = 0; j < datalen/4; j++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					data[j] = bcmswap32(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					data[j] = (bcmswap16(*(uint16 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay])) |
+					         ((bcmswap16(*(uint16 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay + 2])) << 16);
+				}
+			}
+
+			if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				buslen = len = ~(*(ptr + 1));
+				buslen = ROUNDUP(buslen, 16);
+				/* populate actual len in hw-header */
+				if (templen == buslen)
+					*ptr = len;
+			}
+		}
+	}
+
+	/* Restore back the len field of the hw header */
+	if (DWORDMODE_ON) {
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			ptr = (uint16 *)&data[0];
+			*ptr = (uint16)(~*(ptr+1));
+		}
+	}
+
+	dstatus_idx += (datalen + CMDLEN + resp_delay);
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx + 2]) << 16));
+	} else {
+		sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+	if (sd->card_dstatus == 0xffffffff) {
+		sd_err(("looks like not a GSPI device or device is not powered.\n"));
+	}
+
+	err = bcmspi_update_stats(sd, cmd_arg);
+
+	return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	bool write = rw == SDIOH_READ ? 0 : 1;
+	uint retries = 0;
+
+	bool enable;
+	uint32	spilen;
+
+	cmd_arg = 0;
+
+	ASSERT(nbytes);
+	ASSERT(nbytes <= sd->client_block_size[func]);
+
+	if (write) sd->t_cnt++; else sd->r_cnt++;
+
+	if (func == 2) {
+		/* Frame len check limited by gSPI. */
+		if ((nbytes > 2000) && write) {
+			sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+		}
+		/* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+		/* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+		if (write) {
+			uint32 dstatus;
+			/* check F2 ready with cached one */
+			bcmspi_cmd_getdstatus(sd, &dstatus);
+			if ((dstatus & STATUS_F2_RX_READY) == 0) {
+				retries = WAIT_F2RXFIFORDY;
+				enable = 0;
+				while (retries-- && !enable) {
+					OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+					bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+					                   &dstatus);
+					if (dstatus & STATUS_F2_RX_READY)
+						enable = TRUE;
+				}
+				if (!enable) {
+					struct spierrstats_t *spierrstats = &sd->spierrstats;
+					spierrstats->f2rxnotready++;
+					sd_err(("F2 FIFO is not ready to receive data.\n"));
+					return ERROR;
+				}
+				sd_trace(("No of retries on F2 ready %d\n",
+					(WAIT_F2RXFIFORDY - retries)));
+			}
+		}
+	}
+
+	/* F2 transfers happen on 0 addr */
+	addr = (func == 2) ? 0 : addr;
+
+	/* In pio mode buffer is read using fixed address fifo in func 1 */
+	if ((func == 1) && (fifo))
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+	spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+	if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+		/* convert len to mod4 size */
+		spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+	} else
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+	if ((func == 2) && (fifo == 1)) {
+		sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+		          __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+		          addr, nbytes, sd->r_cnt, sd->t_cnt));
+	}
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+	         addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg,
+	     data, nbytes)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+			(write ? "write" : "read")));
+		return status;
+	}
+
+	/* gSPI expects that hw-header-len is equal to spi-command-len */
+	if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+	}
+
+	if ((nbytes > 2000) && !write) {
+		sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+	}
+
+	return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	si->card_init_done = FALSE;
+	return bcmspi_client_init(si);
+}
diff --git a/drivers/net/wireless/bcm4329/bcmutils.c b/drivers/net/wireless/bcm4329/bcmutils.c
new file mode 100644
index 0000000..43c04ee
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmutils.c
@@ -0,0 +1,1838 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c,v 1.210.4.5.2.4.6.19 2010/04/26 06:05:25 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#include <bcmutils.h>
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <siutils.h>
+#else
+#include <stdio.h>
+#include <string.h>
+/* This case for external supplicant use */
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#endif /* BCMDRIVER */
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+
+#ifdef BCMDRIVER
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	if (len < 0)
+		len = 4096;	/* "infinite" */
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(PKTDATA(osh, p) + offset, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(buf, PKTDATA(osh, p) + offset, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint
+pkttotlen(osl_t *osh, void *p)
+{
+	uint total;
+
+	total = 0;
+	for (; p; p = PKTNEXT(osh, p))
+		total += PKTLEN(osh, p);
+	return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+	for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+		;
+
+	return (p);
+}
+
+/* count segments of a chained packet */
+uint
+pktsegcnt(osl_t *osh, void *p)
+{
+	uint cnt;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p))
+		cnt++;
+
+	return cnt;
+}
+
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void *
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, p);
+	else
+		q->head = p;
+
+	q->tail = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void *
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head == NULL)
+		q->tail = p;
+
+	PKTSETLINK(p, q->head);
+	q->head = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void *
+pktq_pdeq(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	q = &pq->q[prec];
+	p = q->head;
+	while (p) {
+		q->head = PKTLINK(p);
+		PKTSETLINK(p, NULL);
+		PKTFREE(osh, p, dir);
+		q->len--;
+		pq->len--;
+		p = q->head;
+	}
+	ASSERT(q->len == 0);
+	q->tail = NULL;
+}
+
+bool
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	if (!pktbuf)
+		return FALSE;
+
+	q = &pq->q[prec];
+
+	if (q->head == pktbuf) {
+		if ((q->head = PKTLINK(pktbuf)) == NULL)
+			q->tail = NULL;
+	} else {
+		for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+			;
+		if (p == NULL)
+			return FALSE;
+
+		PKTSETLINK(p, PKTLINK(pktbuf));
+		if (q->tail == pktbuf)
+			q->tail = p;
+	}
+
+	q->len--;
+	pq->len--;
+	PKTSETLINK(pktbuf, NULL);
+	return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+	int prec;
+
+	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+	/* pq is variable size; only zero out what's requested */
+	bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+	pq->num_prec = (uint16)num_prec;
+
+	pq->max = (uint16)max_len;
+
+	for (prec = 0; prec < num_prec; prec++)
+		pq->q[prec].max = pq->max;
+}
+
+void *
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+{
+	int prec;
+	for (prec = 0; prec < pq->num_prec; prec++)
+		pktq_pflush(osh, pq, prec, dir);
+	ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+	int prec, len;
+
+	len = 0;
+
+	for (prec = 0; prec <= pq->hi_prec; prec++)
+		if (prec_bmp & (1 << prec))
+			len += pq->q[prec].len;
+
+	return len;
+}
+
+/* Priority dequeue from a specific set of precedences */
+void *
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+#endif /* BCMDRIVER */
+
+
+
+const unsigned char bcm_ctype[] = {
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
+	_BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+	_BCM_C,	/* 8-15 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,		/* 32-39 */
+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
+	_BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+	_BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
+	_BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+	_BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 128-143 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 144-159 */
+	_BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 160-175 */
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 176-191 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,	/* 192-207 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L,	/* 208-223 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,	/* 224-239 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(char *cp, char **endp, uint base)
+{
+	ulong result, last_result = 0, value;
+	bool minus;
+
+	minus = FALSE;
+
+	while (bcm_isspace(*cp))
+		cp++;
+
+	if (cp[0] == '+')
+		cp++;
+	else if (cp[0] == '-') {
+		minus = TRUE;
+		cp++;
+	}
+
+	if (base == 0) {
+		if (cp[0] == '0') {
+			if ((cp[1] == 'x') || (cp[1] == 'X')) {
+				base = 16;
+				cp = &cp[2];
+			} else {
+				base = 8;
+				cp = &cp[1];
+			}
+		} else
+			base = 10;
+	} else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+		cp = &cp[2];
+	}
+
+	result = 0;
+
+	while (bcm_isxdigit(*cp) &&
+	       (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+		result = result*base + value;
+		/* Detected overflow */
+		if (result < last_result && !minus)
+			return (ulong)-1;
+		last_result = result;
+		cp++;
+	}
+
+	if (minus)
+		result = (ulong)(-(long)result);
+
+	if (endp)
+		*endp = (char *)cp;
+
+	return (result);
+}
+
+int
+bcm_atoi(char *s)
+{
+	return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char*
+bcmstrstr(char *haystack, char *needle)
+{
+	int len, nlen;
+	int i;
+
+	if ((haystack == NULL) || (needle == NULL))
+		return (haystack);
+
+	nlen = strlen(needle);
+	len = strlen(haystack) - nlen + 1;
+
+	for (i = 0; i < len; i++)
+		if (memcmp(needle, &haystack[i], nlen) == 0)
+			return (&haystack[i]);
+	return (NULL);
+}
+
+char*
+bcmstrcat(char *dest, const char *src)
+{
+	char *p;
+
+	p = dest + strlen(dest);
+
+	while ((*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+char*
+bcmstrncat(char *dest, const char *src, uint size)
+{
+	char *endp;
+	char *p;
+
+	p = dest + strlen(dest);
+	endp = p + size;
+
+	while (p != endp && (*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+
+/****************************************************************************
+* Function:   bcmstrtok
+*
+* Purpose:
+*  Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+*  but allows strToken() to be used by different strings or callers at the same
+*  time. Each call modifies '*string' by substituting a NULL character for the
+*  first delimiter that is encountered, and updates 'string' to point to the char
+*  after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+*  string      (mod) Ptr to string ptr, updated by token.
+*  delimiters  (in)  Set of delimiter characters.
+*  tokdelim    (out) Character that delimits the returned token. (May
+*                    be set to NULL if token delimiter is not required).
+*
+* Returns:  Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+	unsigned char *str;
+	unsigned long map[8];
+	int count;
+	char *nextoken;
+
+	if (tokdelim != NULL) {
+		/* Prime the token delimiter */
+		*tokdelim = '\0';
+	}
+
+	/* Clear control map */
+	for (count = 0; count < 8; count++) {
+		map[count] = 0;
+	}
+
+	/* Set bits in delimiter table */
+	do {
+		map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+	}
+	while (*delimiters++);
+
+	str = (unsigned char*)*string;
+
+	/* Find beginning of token (skip over leading delimiters). Note that
+	 * there is no token iff this loop sets str to point to the terminal
+	 * null (*str == '\0')
+	 */
+	while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+		str++;
+	}
+
+	nextoken = (char*)str;
+
+	/* Find the end of the token. If it is not the end of the string,
+	 * put a null there.
+	 */
+	for (; *str; str++) {
+		if (map[*str >> 5] & (1 << (*str & 31))) {
+			if (tokdelim != NULL) {
+				*tokdelim = *str;
+			}
+
+			*str++ = '\0';
+			break;
+		}
+	}
+
+	*string = (char*)str;
+
+	/* Determine if a token has been found. */
+	if (nextoken == (char *) str) {
+		return NULL;
+	}
+	else {
+		return nextoken;
+	}
+}
+
+
+#define xToLower(C) \
+	((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function:   bcmstricmp
+*
+* Purpose:    Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+*             s2 (in) Second string to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+	char dc, sc;
+
+	while (*s2 && *s1) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+	}
+
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+
+/****************************************************************************
+* Function:   bcmstrnicmp
+*
+* Purpose:    Compare to strings case insensitively, upto a max of 'cnt'
+*             characters.
+*
+* Parameters: s1  (in) First string to compare.
+*             s2  (in) Second string to compare.
+*             cnt (in) Max characters to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+	char dc, sc;
+
+	while (*s2 && *s1 && cnt) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+		cnt--;
+	}
+
+	if (!cnt) return 0;
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(char *p, struct ether_addr *ea)
+{
+	int i = 0;
+
+	for (;;) {
+		ea->octet[i++] = (char) bcm_strtoul(p, &p, 16);
+		if (!*p++ || i == 6)
+			break;
+	}
+
+	return (i == 6);
+}
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+	ulong copyct = 1;
+	ushort i;
+
+	if (abuflen == 0)
+		return 0;
+
+	/* wbuflen is in bytes */
+	wbuflen /= sizeof(ushort);
+
+	for (i = 0; i < wbuflen; ++i) {
+		if (--abuflen == 0)
+			break;
+		*abuf++ = (char) *wbuf++;
+		++copyct;
+	}
+	*abuf = '\0';
+
+	return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+	static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x";
+	snprintf(buf, 18, template,
+		ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff,
+		ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff);
+	return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+	snprintf(buf, 16, "%d.%d.%d.%d",
+	         ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+	return (buf);
+}
+
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+	uint i;
+
+	for (i = 0; i < ms; i++) {
+		OSL_DELAY(1000);
+	}
+}
+
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+	void *p;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	for (p = p0; p; p = PKTNEXT(osh, p))
+		prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif	
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint
+pktsetprio(void *pkt, bool update_vtag)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata;
+	int priority = 0;
+	int rc = 0;
+
+	pktdata = (uint8 *) PKTDATA(NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+	eh = (struct ether_header *) pktdata;
+
+	if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+		uint16 vlan_tag;
+		int vlan_prio, dscp_prio = 0;
+
+		evh = (struct ethervlan_header *)eh;
+
+		vlan_tag = ntoh16(evh->vlan_tag);
+		vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+		if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+			uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+			uint8 tos_tc = IP_TOS(ip_body);
+			dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		}
+
+		/* DSCP priority gets precedence over 802.1P (vlan tag) */
+		if (dscp_prio != 0) {
+			priority = dscp_prio;
+			rc |= PKTPRIO_VDSCP;
+		} else {
+			priority = vlan_prio;
+			rc |= PKTPRIO_VLAN;
+		}
+		/*
+		 * If the DSCP priority is not the same as the VLAN priority,
+		 * then overwrite the priority field in the vlan tag, with the
+		 * DSCP priority value. This is required for Linux APs because
+		 * the VLAN driver on Linux, overwrites the skb->priority field
+		 * with the priority value in the vlan tag
+		 */
+		if (update_vtag && (priority != vlan_prio)) {
+			vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+			vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+			evh->vlan_tag = hton16(vlan_tag);
+			rc |= PKTPRIO_UPD;
+		}
+	} else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+		uint8 *ip_body = pktdata + sizeof(struct ether_header);
+		uint8 tos_tc = IP_TOS(ip_body);
+		priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		rc |= PKTPRIO_DSCP;
+	}
+
+	ASSERT(priority >= 0 && priority <= MAXPRIO);
+	PKTSETPRIO(pkt, priority);
+	return (rc | priority);
+}
+
+static char bcm_undeferrstr[BCME_STRLEN];
+
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings  */
+const char *
+bcmerrorstr(int bcmerror)
+{
+	/* check if someone added a bcmerror code but forgot to add errorstring */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+	if (bcmerror > 0 || bcmerror < BCME_LAST) {
+		snprintf(bcm_undeferrstr, BCME_STRLEN, "Undefined error %d", bcmerror);
+		return bcm_undeferrstr;
+	}
+
+	ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+	return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+	const bcm_iovar_t *vi;
+	const char *lookup_name;
+
+	/* skip any ':' delimited option prefixes */
+	lookup_name = strrchr(name, ':');
+	if (lookup_name != NULL)
+		lookup_name++;
+	else
+		lookup_name = name;
+
+	ASSERT(table != NULL);
+
+	for (vi = table; vi->name; vi++) {
+		if (!strcmp(vi->name, lookup_name))
+			return vi;
+	}
+	/* ran to end of table */
+
+	return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+
+	/* length check on io buf */
+	switch (vi->type) {
+	case IOVT_BOOL:
+	case IOVT_INT8:
+	case IOVT_INT16:
+	case IOVT_INT32:
+	case IOVT_UINT8:
+	case IOVT_UINT16:
+	case IOVT_UINT32:
+		/* all integers are int32 sized args at the ioctl interface */
+		if (len < (int)sizeof(int)) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_BUFFER:
+		/* buffer must meet minimum length requirement */
+		if (len < vi->minlen) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_VOID:
+		if (!set) {
+			/* Cannot return nil... */
+			bcmerror = BCME_UNSUPPORTED;
+		} else if (len) {
+			/* Set is an action w/o parameters */
+			bcmerror = BCME_BUFTOOLONG;
+		}
+		break;
+
+	default:
+		/* unknown type for length check in iovar info */
+		ASSERT(0);
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#endif	/* BCMDRIVER */
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ *       x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+STATIC const uint8 crc8_table[256] = {
+    0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+    0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+    0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+    0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+    0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+    0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+    0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+    0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+    0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+    0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+    0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+    0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+    0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+    0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+    0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+    0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+    0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+    0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+    0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+    0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+    0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+    0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+    0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+    0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+    0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+    0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+    0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+    0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+    0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+    0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+    0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+    0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+	(c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+	uint8 *pdata,	/* pointer to array of data to process */
+	uint  nbytes,	/* number of input data bytes to process */
+	uint8 crc	/* either CRC8_INIT_VALUE or previous return value */
+)
+{
+	/* hard code the crc loop instead of using CRC_INNER_LOOP macro
+	 * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+	 */
+	while (nbytes-- > 0)
+		crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ *       x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+    0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+    0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+    0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+    0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+    0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+    0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+    0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+    0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+    0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+    0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+    0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+    0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+    0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+    0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+    0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+    0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+    0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+    0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+    0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+    0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+    0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+    0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+    0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+    0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+    0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+    0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+    0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+    0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+    0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+    0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+    0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+    0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+    uint8 *pdata,  /* pointer to array of data to process */
+    uint nbytes, /* number of input data bytes to process */
+    uint16 crc     /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+	while (nbytes-- > 0)
+		CRC_INNER_LOOP(16, crc, *pdata++);
+	return crc;
+}
+
+STATIC const uint32 crc32_table[256] = {
+    0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+    0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+    0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+    0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+    0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+    0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+    0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+    0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+    0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+    0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+    0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+    0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+    0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+    0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+    0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+    0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+    0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+    0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+    0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+    0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+    0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+    0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+    0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+    0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+    0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+    0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+    0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+    0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+    0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+    0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+    0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+    0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+    0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+    0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+    0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+    0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+    0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+    0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+    0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+    0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+    0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+    0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+    0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+    0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+    0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+    0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+    0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+    0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+    0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+    0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+    0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+    0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+    0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+    0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+    0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+    0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+    0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+    0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+    0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+    0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+    0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+    0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+    0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+    0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+uint32
+hndcrc32(
+    uint8 *pdata,  /* pointer to array of data to process */
+    uint   nbytes, /* number of input data bytes to process */
+    uint32 crc     /* either CRC32_INIT_VALUE or previous return value */
+)
+{
+	uint8 *pend;
+#ifdef __mips__
+	uint8 tmp[4];
+	ulong *tptr = (ulong *)tmp;
+
+	/* in case the beginning of the buffer isn't aligned */
+	pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc);
+	nbytes -= (pend - pdata);
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+
+	/* handle bulk of data as 32-bit words */
+	pend = pdata + (nbytes & 0xfffffffc);
+	while (pdata < pend) {
+		*tptr = *(ulong *)pdata;
+		pdata += sizeof(ulong *);
+		CRC_INNER_LOOP(32, crc, tmp[0]);
+		CRC_INNER_LOOP(32, crc, tmp[1]);
+		CRC_INNER_LOOP(32, crc, tmp[2]);
+		CRC_INNER_LOOP(32, crc, tmp[3]);
+	}
+
+	/* 1-3 bytes at end of buffer */
+	pend = pdata + (nbytes & 0x03);
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+#else
+	pend = pdata + nbytes;
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+#endif /* __mips__ */
+
+	return crc;
+}
+
+#ifdef notdef
+#define CLEN 	1499 	/*  CRC Length */
+#define CBUFSIZ 	(CLEN+4)
+#define CNBUFS		5 /* # of bufs */
+
+void testcrc32(void)
+{
+	uint j, k, l;
+	uint8 *buf;
+	uint len[CNBUFS];
+	uint32 crcr;
+	uint32 crc32tv[CNBUFS] =
+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+	/* step through all possible alignments */
+	for (l = 0; l <= 4; l++) {
+		for (j = 0; j < CNBUFS; j++) {
+			len[j] = CLEN;
+			for (k = 0; k < len[j]; k++)
+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+		}
+
+		for (j = 0; j < CNBUFS; j++) {
+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+			ASSERT(crcr == crc32tv[j]);
+		}
+	}
+
+	MFREE(buf, CBUFSIZ*CNBUFS);
+	return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+	int len;
+
+	/* validate current elt */
+	if (!bcm_valid_tlv(elt, *buflen))
+		return NULL;
+
+	/* advance to next elt */
+	len = elt->len;
+	elt = (bcm_tlv_t*)(elt->data + len);
+	*buflen -= (2 + len);
+
+	/* validate next elt */
+	if (!bcm_valid_tlv(elt, *buflen))
+		return NULL;
+
+	return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= 2) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (len + 2)))
+			return (elt);
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+		totlen -= (len + 2);
+	}
+
+	return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.  Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= 2) {
+		uint id = elt->id;
+		int len = elt->len;
+
+		/* Punt if we start seeing IDs > than target key */
+		if (id > key)
+			return (NULL);
+
+		/* validate remaining totlen */
+		if ((id == key) && (totlen >= (len + 2)))
+			return (elt);
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+		totlen -= (len + 2);
+	}
+	return NULL;
+}
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+	int i;
+	char* p = buf;
+	char hexstr[16];
+	int slen = 0;
+	uint32 bit;
+	const char* name;
+
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+	len -= 1;
+
+	for (i = 0; flags != 0; i++) {
+		bit = bd[i].bit;
+		name = bd[i].name;
+		if (bit == 0 && flags) {
+			/* print any unnamed bits */
+			sprintf(hexstr, "0x%X", flags);
+			name = hexstr;
+			flags = 0;	/* exit loop */
+		} else if ((flags & bit) == 0)
+			continue;
+		slen += strlen(name);
+		if (len < slen)
+			break;
+		if (p != buf) p += sprintf(p, " "); /* btwn flag space */
+		strcat(p, name);
+		p += strlen(name);
+		flags &= ~bit;
+		len -= slen;
+		slen = 1;	/* account for btwn flag space */
+	}
+
+	/* indicate the str was too short */
+	if (flags != 0) {
+		if (len == 0)
+			p--;	/* overwrite last char */
+		p += sprintf(p, ">");
+	}
+
+	return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+	int i;
+	char *p = str;
+	const uint8 *src = (const uint8*)bytes;
+
+	for (i = 0; i < len; i++) {
+		p += sprintf(p, "%02X", *src);
+		src++;
+	}
+	return (int)(p - str);
+}
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+	char line[128], *p;
+	uint i;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	p = line;
+	for (i = 0; i < nbytes; i++) {
+		if (i % 16 == 0) {
+			p += sprintf(p, "  %04d: ", i);	/* line prefix */
+		}
+		p += sprintf(p, "%02x ", buf[i]);
+		if (i % 16 == 15) {
+			printf("%s\n", line);		/* flush line */
+			p = line;
+		}
+	}
+
+	/* flush last partial line */
+	if (p != line)
+		printf("%s\n", line);
+}
+#endif 
+
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+	if (brev < 0x100)
+		snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+	else
+		snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+	return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+	uint len, max_len;
+	char c;
+
+	len = strlen(buf);
+
+	max_len = BUFSIZE_TODUMP_ATONCE;
+
+	while (len > max_len) {
+		c = buf[max_len];
+		buf[max_len] = '\0';
+		printf("%s", buf);
+		buf[max_len] = c;
+
+		buf += max_len;
+		len -= max_len;
+	}
+	/* print the remaining string */
+	printf("%s\n", buf);
+	return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+	char *buf, uint32 bufsize)
+{
+	uint  filled_len;
+	int len;
+	struct fielddesc *cur_ptr;
+
+	filled_len = 0;
+	cur_ptr = fielddesc_array;
+
+	while (bufsize > 1) {
+		if (cur_ptr->nameandfmt == NULL)
+			break;
+		len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+		               read_rtn(arg0, arg1, cur_ptr->offset));
+		/* check for snprintf overflow or error */
+		if (len < 0 || (uint32)len >= bufsize)
+			len = bufsize - 1;
+		buf += len;
+		bufsize -= len;
+		filled_len += len;
+		cur_ptr++;
+	}
+	return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+	uint len;
+
+	len = strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	strncpy(buf, name, buflen);
+
+	/* append data onto the end of the name string */
+	memcpy(&buf[len], data, datalen);
+	len += datalen;
+
+	return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153		/* Offset for first entry */
+#define QDBM_TABLE_LEN 40	/* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: 	+0 	+1 	+2 	+3 	+4 	+5 	+6 	+7 */
+/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
+/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
+/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
+/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
+/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+	uint factor = 1;
+	int idx = qdbm - QDBM_OFFSET;
+
+	if (idx >= QDBM_TABLE_LEN) {
+		/* clamp to max uint16 mW value */
+		return 0xFFFF;
+	}
+
+	/* scale the qdBm index up to the range of the table 0-40
+	 * where an offset of 40 qdBm equals a factor of 10 mW.
+	 */
+	while (idx < 0) {
+		idx += 40;
+		factor *= 10;
+	}
+
+	/* return the mW value scaled down to the correct factor of 10,
+	 * adding in factor/2 to get proper rounding.
+	 */
+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+	uint8 qdbm;
+	int offset;
+	uint mw_uint = mw;
+	uint boundary;
+
+	/* handle boundary case */
+	if (mw_uint <= 1)
+		return 0;
+
+	offset = QDBM_OFFSET;
+
+	/* move mw into the range of the table */
+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+		mw_uint *= 10;
+		offset -= 40;
+	}
+
+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+		                                    nqdBm_to_mW_map[qdbm])/2;
+		if (mw_uint < boundary) break;
+	}
+
+	qdbm += (uint8)offset;
+
+	return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+	uint bitcount = 0, i;
+	uint8 tmp;
+	for (i = 0; i < length; i++) {
+		tmp = bitmap[i];
+		while (tmp) {
+			bitcount++;
+			tmp &= (tmp - 1);
+		}
+	}
+	return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+	b->origsize = b->size = size;
+	b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+	r = vsnprintf(b->buf, b->size, fmt, ap);
+
+	/* Non Ansi C99 compliant returns -1,
+	 * Ansi compliant return r >= b->size,
+	 * bcmstdlib returns 0, handle all
+	 */
+	if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
+		b->size = 0;
+	} else {
+		b->size -= r;
+		b->buf += r;
+	}
+
+	va_end(ap);
+
+	return r;
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+	int i;
+
+	for (i = 0; i < num_bytes; i++) {
+		num[i] += amount;
+		if (num[i] >= amount)
+			break;
+		amount = 1;
+	}
+}
+
+int
+bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes)
+{
+	int i;
+
+	for (i = nbytes - 1; i >= 0; i--) {
+		if (arg1[i] != arg2[i])
+			return (arg1[i] - arg2[i]);
+	}
+	return 0;
+}
+
+void
+bcm_print_bytes(char *name, const uchar *data, int len)
+{
+	int i;
+	int per_line = 0;
+
+	printf("%s: %d \n", name ? name : "", len);
+	for (i = 0; i < len; i++) {
+		printf("%02x ", *data++);
+		per_line++;
+		if (per_line == 16) {
+			per_line = 0;
+			printf("\n");
+		}
+	}
+	printf("\n");
+}
+
+/*
+ * buffer length needed for wlc_format_ssid
+ * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
+ */
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+	uint i, c;
+	char *p = buf;
+	char *endp = buf + SSID_FMT_BUF_LEN;
+
+	if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (uint)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (bcm_isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += snprintf(p, (endp - p), "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+	ASSERT(p < endp);
+
+	return (int)(p - buf);
+}
+#endif 
+
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcm4329/bcmwifi.c b/drivers/net/wireless/bcm4329/bcmwifi.c
new file mode 100644
index 0000000..803acf8
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmwifi.c
@@ -0,0 +1,199 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi.c,v 1.18.24.2.4.1 2009/09/25 00:32:01 Exp $
+ */
+
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#endif 
+#include <bcmwifi.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> 	
+#endif
+
+
+
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+	const char *band, *bw, *sb;
+	uint channel;
+
+	band = "";
+	bw = "";
+	sb = "";
+	channel = CHSPEC_CHANNEL(chspec);
+	
+	if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+	    (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+		band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+	if (CHSPEC_IS40(chspec)) {
+		if (CHSPEC_SB_UPPER(chspec)) {
+			sb = "u";
+			channel += CH_10MHZ_APART;
+		} else {
+			sb = "l";
+			channel -= CH_10MHZ_APART;
+		}
+	} else if (CHSPEC_IS10(chspec)) {
+		bw = "n";
+	}
+
+	
+	snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+	return (buf);
+}
+
+
+chanspec_t
+wf_chspec_aton(char *a)
+{
+	char *endp = NULL;
+	uint channel, band, bw, ctl_sb;
+	char c;
+
+	channel = strtoul(a, &endp, 10);
+
+	
+	if (endp == a)
+		return 0;
+
+	if (channel > MAXCHANNEL)
+		return 0;
+
+	band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+	bw = WL_CHANSPEC_BW_20;
+	ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+	a = endp;
+
+	c = tolower(a[0]);
+	if (c == '\0')
+		goto done;
+
+	
+	if (c == 'a' || c == 'b') {
+		band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+		a++;
+		c = tolower(a[0]);
+		if (c == '\0')
+			goto done;
+	}
+
+	
+	if (c == 'n') {
+		bw = WL_CHANSPEC_BW_10;
+	} else if (c == 'l') {
+		bw = WL_CHANSPEC_BW_40;
+		ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+		
+		if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+			channel += CH_10MHZ_APART;
+		else
+			return 0;
+	} else if (c == 'u') {
+		bw = WL_CHANSPEC_BW_40;
+		ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+		
+		if (channel > CH_20MHZ_APART)
+			channel -= CH_10MHZ_APART;
+		else
+			return 0;
+	} else {
+		return 0;
+	}
+
+done:
+	return (channel | band | bw | ctl_sb);
+}
+
+
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+	int ch = -1;
+	uint base;
+	int offset;
+
+	
+	if (start_factor == 0) {
+		if (freq >= 2400 && freq <= 2500)
+			start_factor = WF_CHAN_FACTOR_2_4_G;
+		else if (freq >= 5000 && freq <= 6000)
+			start_factor = WF_CHAN_FACTOR_5_G;
+	}
+
+	if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+		return 14;
+
+	base = start_factor / 2;
+
+	
+	if ((freq < base) || (freq > base + 1000))
+		return -1;
+
+	offset = freq - base;
+	ch = offset / 5;
+
+	
+	if (offset != (ch * 5))
+		return -1;
+
+	
+	if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+		return -1;
+
+	return ch;
+}
+
+
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+	int freq;
+
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+	    (ch <= 200))
+		freq = -1;
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+		freq = 2484;
+	else
+		freq = ch * 5 + start_factor / 2;
+
+	return freq;
+}
diff --git a/drivers/net/wireless/bcm4329/dhd.h b/drivers/net/wireless/bcm4329/dhd.h
new file mode 100644
index 0000000..9aeca7e
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd.h
@@ -0,0 +1,457 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h,v 1.32.4.7.2.4.14.49.4.9 2011/01/14 22:40:45 Exp $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#if defined(LINUX)
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+/* The kernel threading is sdio-specific */
+#else /* LINUX */
+#define ENOMEM		1
+#define EFAULT      2
+#define EINVAL		3
+#define EIO			4
+#define ETIMEDOUT	5
+#define ERESTARTSYS 6
+#endif /* LINUX */
+
+#include <wlioctl.h>
+
+#ifdef DHD_DEBUG
+#ifndef DHD_DEBUG_TRAP
+#define DHD_DEBUG_TRAP
+#endif
+#endif
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+	DHD_BUS_DOWN,		/* Not ready for frame transfers */
+	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
+	DHD_BUS_DATA		/* Ready for frame transfers */
+};
+
+enum dhd_bus_wake_state {
+	WAKE_LOCK_OFF,
+	WAKE_LOCK_PRIV,
+	WAKE_LOCK_DPC,
+	WAKE_LOCK_IOCTL,
+	WAKE_LOCK_DOWNLOAD,
+	WAKE_LOCK_TMOUT,
+	WAKE_LOCK_WATCHDOG,
+	WAKE_LOCK_LINK_DOWN_TMOUT,
+	WAKE_LOCK_PNO_FIND_TMOUT,
+	WAKE_LOCK_SOFTAP_SET,
+	WAKE_LOCK_SOFTAP_STOP,
+	WAKE_LOCK_SOFTAP_START,
+	WAKE_LOCK_SOFTAP_THREAD,
+	WAKE_LOCK_MAX
+};
+enum dhd_prealloc_index {
+	DHD_PREALLOC_PROT = 0,
+	DHD_PREALLOC_RXBUF,
+	DHD_PREALLOC_DATABUF,
+	DHD_PREALLOC_OSL_BUF
+};
+#ifdef DHD_USE_STATIC_BUF
+extern void * dhd_os_prealloc(int section, unsigned long size);
+#endif
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+	/* Linkage ponters */
+	osl_t *osh;		/* OSL handle */
+	struct dhd_bus *bus;	/* Bus module handle */
+	struct dhd_prot *prot;	/* Protocol module handle */
+	struct dhd_info  *info; /* Info module handle */
+
+	/* Internal dhd items */
+	bool up;		/* Driver up/down (to OS) */
+	bool txoff;		/* Transmit flow-controlled */
+	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
+	enum dhd_bus_state busstate;
+	uint hdrlen;		/* Total DHD header length (proto + bus) */
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	uint rxsz;		/* Rx buffer size bus module should use */
+	uint8 wme_dp;	/* wme discard priority */
+
+	/* Dongle media info */
+	bool iswl;		/* Dongle-resident driver is wl */
+	ulong drv_version;	/* Version of dongle-resident driver */
+	struct ether_addr mac;	/* MAC address obtained from dongle */
+	dngl_stats_t dstats;	/* Stats for dongle-based data */
+
+	/* Additional stats for the bus level */
+	ulong tx_packets;	/* Data packets sent to dongle */
+	ulong tx_multicast;	/* Multicast data packets sent to dongle */
+	ulong tx_errors;	/* Errors in sending data to dongle */
+	ulong tx_ctlpkts;	/* Control packets sent to dongle */
+	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
+	ulong rx_packets;	/* Packets sent up the network interface */
+	ulong rx_multicast;	/* Multicast packets sent up the network interface */
+	ulong rx_errors;	/* Errors processing rx data packets */
+	ulong rx_ctlpkts;	/* Control frames processed from dongle */
+	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
+	ulong rx_dropped;	/* Packets dropped locally (no memory) */
+	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
+	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
+
+	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
+	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
+	ulong fc_packets;       /* Number of flow control pkts recvd */
+
+	/* Last error return */
+	int bcmerror;
+	uint tickcnt;
+
+	/* Last error from dongle */
+	int dongle_error;
+
+	/* Suspend disable flag and "in suspend" flag */
+	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+	int in_suspend;			/* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+	int pno_enable;                 /* pno status : "1" is pno enable */
+#endif /* PNO_SUPPORT */
+	int dtim_skip;         /* dtim skip , default 0 means wake each dtim */
+
+	/* Pkt filter defination */
+	char * pktfilter[100];
+	int pktfilter_count;
+
+	wl_country_t dhd_cspec;		/* Current Locale info */
+	char eventmask[WL_EVENTING_MASK_LEN];
+
+} dhd_pub_t;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define _DHD_PM_RESUME_WAIT(a, b) do { \
+			int retry = 0; \
+			smp_mb(); \
+			while (dhd_mmc_suspend && retry++ != b) { \
+				wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+			} \
+		} 	while (0)
+	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)	do { if (dhd_mmc_suspend) return a; } while (0)
+	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define SPINWAIT_SLEEP(a, exp, us) do { \
+		uint countdown = (us) + 9999; \
+		while ((exp) && (countdown >= 10000)) { \
+			wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+			countdown -= 10000; \
+		} \
+	} while (0)
+
+#else
+
+	#define DHD_PM_RESUME_WAIT_INIT(a)
+	#define DHD_PM_RESUME_WAIT(a)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)
+	#define DHD_PM_RESUME_RETURN
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a)
+	#define SPINWAIT_SLEEP(a, exp, us)  do { \
+		uint countdown = (us) + 9; \
+		while ((exp) && (countdown >= 10)) { \
+			OSL_DELAY(10);  \
+			countdown -= 10;  \
+		} \
+	} while (0)
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
+
+inline static void NETIF_ADDR_LOCK(struct net_device *dev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+	netif_addr_lock_bh(dev);
+#endif
+}
+
+inline static void NETIF_ADDR_UNLOCK(struct net_device *dev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+	netif_addr_unlock_bh(dev);
+#endif
+}
+
+/* Wakelock Functions */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub);
+
+extern void dhd_os_start_lock(dhd_pub_t *pub);
+extern void dhd_os_start_unlock(dhd_pub_t *pub);
+extern unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+typedef struct dhd_if_event {
+	uint8 ifidx;
+	uint8 action;
+	uint8 flags;
+	uint8 bssidx;
+} dhd_if_event_t;
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* To allow osl_attach/detach calls from os-independent modules */
+osl_t *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(osl_t *osh);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* Query ioctl */
+extern int  dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void * dhd_os_open_image(char * filename);
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+#if defined(OOB_INTR_ONLY)
+extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
+#endif /* defined(OOB_INTR_ONLY) */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+
+typedef struct {
+	uint32 limit;		/* Expiration time (usec) */
+	uint32 increment;	/* Current expiration increment (usec) */
+	uint32 elapsed;		/* Current elapsed time (usec) */
+	uint32 tick;		/* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern uint8 *dhd_bssidx2bssid(dhd_pub_t *dhd, int idx);
+extern int wl_host_event(struct dhd_info *dhd, int *idx, void *pktdata,
+                         wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern void dhd_common_init(void);
+
+extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+	char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
+extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int  dhd_bus_start(dhd_pub_t *dhdp);
+
+extern void print_buf(void *pbuf, int len, int bytes_per_line);
+
+
+typedef enum cust_gpio_modes {
+	WLAN_RESET_ON,
+	WLAN_RESET_OFF,
+	WLAN_POWER_ON,
+	WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+extern int net_os_send_hang_message(struct net_device *dev);
+
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+#endif /* defined(DHD_DEBUG) */
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/*  Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#define DHD_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR	"null_pkt"
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN	2048
+extern char fw_path[MOD_PARAM_PATHLEN];
+extern char nv_path[MOD_PARAM_PATHLEN];
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS	16
+#define DHD_DEL_IF	-0xe
+#define DHD_BAD_IF	-0xf
+
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+/* dhd_commn arp offload wrapers */
+extern void dhd_arp_cleanup(dhd_pub_t *dhd);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, u32 ipaddr);
+
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_bus.h b/drivers/net/wireless/bcm4329/dhd_bus.h
new file mode 100644
index 0000000..97af41b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_bus.h
@@ -0,0 +1,93 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h,v 1.4.6.3.2.3.6.7 2010/08/13 01:35:24 Exp $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+	char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* DHD_DEBUG */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                            void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_cdc.c b/drivers/net/wireless/bcm4329/dhd_cdc.c
new file mode 100644
index 0000000..61f6a6f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_cdc.c
@@ -0,0 +1,522 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c,v 1.22.4.2.4.7.2.41 2010/06/23 19:58:18 Exp $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload).
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN	(16+DHD_SDALIGN)	/* Must be atleast SDPCM_RESERVE
+				 * defined in dhd_sdio.c (amount of header tha might be added)
+				 * plus any space that might be needed for alignment padding.
+				 */
+#define ROUND_UP_MARGIN	2048 	/* Biggest SDIO block size possible for
+				 * round off at the end of buffer
+				 */
+
+typedef struct dhd_prot {
+	uint16 reqid;
+	uint8 pending;
+	uint32 lastcmd;
+	uint8 bus_header[BUS_HEADER_LEN];
+	cdc_ioctl_t msg;
+	unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+	int ret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_wake_lock(dhd);
+
+	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
+	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+	 *	  is actually sent to the dongle
+	 */
+	if (len > CDC_MAX_MSG_SIZE)
+		len = CDC_MAX_MSG_SIZE;
+
+	/* Send request */
+	ret = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+	dhd_os_wake_unlock(dhd);
+	return ret;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+	int ret;
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	do {
+		ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, len+sizeof(cdc_ioctl_t));
+		if (ret < 0)
+			break;
+	} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+	return ret;
+}
+
+int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	void *info;
+	int ret = 0, retries = 0;
+	uint32 id, flags = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if ((id < prot->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check info buffer */
+	info = (void*)&msg[1];
+
+	/* Copy info buffer */
+	if (buf)
+	{
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, info, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0;
+	uint32 flags, id;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT) | CDCF_IOC_SET;
+	CDC_SET_IF_IDX(msg, ifidx);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0)
+		goto done;
+
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+extern int dhd_bus_interface(struct dhd_bus *bus, uint arg, void* arg2);
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return ret;
+	}
+	dhd_os_proto_block(dhd);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_TRACE(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	if (ioc->set)
+		ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len);
+	else {
+		ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len);
+		if (ret > 0)
+			ioc->used = ret - sizeof(cdc_ioctl_t);
+	}
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		cdc_ioctl_t *msg = &prot->msg;
+		ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+	prot->pending = FALSE;
+
+done:
+	dhd_os_proto_unblock(dhd);
+
+	return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                  void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+}
+
+
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif /* BDC */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Push BDC header used to convey priority for buses that don't */
+
+
+	PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN);
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(pktbuf))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->rssi = 0;
+#endif /* BDC */
+	BDC_SET_IF_IDX(h, ifidx);
+}
+
+
+bool
+dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, uint8 *fcbits)
+{
+#ifdef BDC
+	struct bdc_header *h;
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n",
+			__FUNCTION__, PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	*fcbits = h->priority >> BDC_PRIORITY_FC_SHIFT;
+	if ((h->flags2 & BDC_FLAG2_FC_FLAG) == BDC_FLAG2_FC_FLAG)
+		return TRUE;
+#endif
+	return FALSE;
+}
+
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Pop BDC header used to convey priority for buses that don't */
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+		           __FUNCTION__, *ifidx));
+		return BCME_ERROR;
+	}
+
+	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+		DHD_ERROR(("%s: non-BDC packet received, flags 0x%x\n",
+		           dhd_ifname(dhd, *ifidx), h->flags));
+		return BCME_ERROR;
+	}
+
+	if (h->flags & BDC_FLAG_SUM_GOOD) {
+		DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+		          dhd_ifname(dhd, *ifidx), h->flags));
+		PKTSETSUMGOOD(pktbuf, TRUE);
+	}
+
+	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+
+	PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+	return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *cdc;
+
+#ifndef DHD_USE_STATIC_BUF
+	if (!(cdc = (dhd_prot_t *)MALLOC(dhd->osh, sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+#else
+	if (!(cdc = (dhd_prot_t *)dhd_os_prealloc(DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+#endif /* DHD_USE_STATIC_BUF */
+	memset(cdc, 0, sizeof(dhd_prot_t));
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+		DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+		goto fail;
+	}
+
+	dhd->prot = cdc;
+#ifdef BDC
+	dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+	dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+	return 0;
+
+fail:
+#ifndef DHD_USE_STATIC_BUF
+	if (cdc != NULL)
+		MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
+#endif
+	return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifndef DHD_USE_STATIC_BUF
+	MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif
+	dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+	/* No stats from dongle added yet, copy bus stats */
+	dhd->dstats.tx_packets = dhd->tx_packets;
+	dhd->dstats.tx_errors = dhd->tx_errors;
+	dhd->dstats.rx_packets = dhd->rx_packets;
+	dhd->dstats.rx_errors = dhd->rx_errors;
+	dhd->dstats.rx_dropped = dhd->rx_dropped;
+	dhd->dstats.multicast = dhd->rx_multicast;
+	return;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	char buf[128];
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_proto_block(dhd);
+
+	/* Get the device MAC address */
+	strcpy(buf, "cur_etheraddr");
+	ret = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf));
+	if (ret < 0) {
+		dhd_os_proto_unblock(dhd);
+		return ret;
+	}
+	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+	dhd_os_proto_unblock(dhd);
+
+#ifdef EMBEDDED_PLATFORM
+	ret = dhd_preinit_ioctls(dhd);
+#endif /* EMBEDDED_PLATFORM */
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+
+	return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+	/* Nothing to do for CDC */
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_common.c b/drivers/net/wireless/bcm4329/dhd_common.c
new file mode 100644
index 0000000..e50da14
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_common.c
@@ -0,0 +1,2426 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.25 2011-02-11 21:16:02 Exp $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#include <wlioctl.h>
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+int wifi_get_mac_addr(unsigned char *buf);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+int dhd_msg_level;
+
+#include <wl_iw.h>
+
+char fw_path[MOD_PARAM_PATHLEN];
+char nv_path[MOD_PARAM_PATHLEN];
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_wl_ioctl(dhd_pub_t *dhd, uint cmd, char *buf, uint buflen);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+
+#if defined(SOFTAP)
+extern bool ap_fw_loaded;
+#endif
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on);
+#endif /* KEEP_ALIVE */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#ifdef DHD_DEBUG
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
+	__DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+/* IOVar table */
+enum {
+	IOV_VERSION = 1,
+	IOV_MSGLEVEL,
+	IOV_BCMERRORSTR,
+	IOV_BCMERROR,
+	IOV_WDTICK,
+	IOV_DUMP,
+#ifdef DHD_DEBUG
+	IOV_CONS,
+	IOV_DCONSOLE_POLL,
+#endif
+	IOV_CLEARCOUNTS,
+	IOV_LOGDUMP,
+	IOV_LOGCAL,
+	IOV_LOGSTAMP,
+	IOV_GPIOOB,
+	IOV_IOCTLTIMEOUT,
+	IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+	{"version", 	IOV_VERSION,	0,	IOVT_BUFFER,	sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+	{"msglevel",	IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG */
+	{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER,	BCME_STRLEN },
+	{"bcmerror",	IOV_BCMERROR,	0,	IOVT_INT8,	0 },
+	{"wdtick",	IOV_WDTICK, 0,	IOVT_UINT32,	0 },
+	{"dump",	IOV_DUMP,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+	{"dconpoll",	IOV_DCONSOLE_POLL, 0,	IOVT_UINT32,	0 },
+	{"cons",	IOV_CONS,	0,	IOVT_BUFFER,	0 },
+#endif
+	{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID,	0 },
+	{"gpioob",	IOV_GPIOOB,	0,	IOVT_UINT32,	0 },
+	{"ioctl_timeout",	IOV_IOCTLTIMEOUT,	0,	IOVT_UINT32,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+void
+dhd_common_init(void)
+{
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behaviour since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_msg_level = DHD_ERROR_VAL;
+#ifdef CONFIG_BCM4329_FW_PATH
+	strncpy(fw_path, CONFIG_BCM4329_FW_PATH, MOD_PARAM_PATHLEN-1);
+#else
+	fw_path[0] = '\0';
+#endif
+#ifdef CONFIG_BCM4329_NVRAM_PATH
+	strncpy(nv_path, CONFIG_BCM4329_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+#else
+	nv_path[0] = '\0';
+#endif
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	struct bcmstrbuf b;
+	struct bcmstrbuf *strbuf = &b;
+
+	bcm_binit(strbuf, buf, buflen);
+
+	/* Base DHD info */
+	bcm_bprintf(strbuf, "%s\n", dhd_version);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+	            dhdp->up, dhdp->txoff, dhdp->busstate);
+	bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+	            dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
+
+	bcm_bprintf(strbuf, "dongle stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
+	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+	bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
+	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+	bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
+
+	bcm_bprintf(strbuf, "bus stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+	            dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+	bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+	bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
+	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+	bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld rx_flushed %ld\n",
+	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped, dhdp->rx_flushed);
+	bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld fc_packets %ld\n",
+	            dhdp->rx_readahead_cnt, dhdp->tx_realloc, dhdp->fc_packets);
+	bcm_bprintf(strbuf, "wd_dpc_sched %ld\n", dhdp->wd_dpc_sched);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any prot info */
+	dhd_prot_dump(dhdp, strbuf);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any bus info */
+	dhd_bus_dump(dhdp, strbuf);
+
+	return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+            void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_VERSION):
+		/* Need to have checked buffer length */
+		strncpy((char*)arg, dhd_version, len);
+		break;
+
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)dhd_msg_level;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		dhd_msg_level = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BCMERRORSTR):
+		strncpy((char *)arg, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+		((char *)arg)[BCME_STRLEN - 1] = 0x00;
+		break;
+
+	case IOV_GVAL(IOV_BCMERROR):
+		int_val = (int32)dhd_pub->bcmerror;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_WDTICK):
+		int_val = (int32)dhd_watchdog_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WDTICK):
+		if (!dhd_pub->up) {
+			bcmerror = BCME_NOTUP;
+			break;
+		}
+		dhd_os_wd_timer(dhd_pub, (uint)int_val);
+		break;
+
+	case IOV_GVAL(IOV_DUMP):
+		bcmerror = dhd_dump(dhd_pub, arg, len);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_DCONSOLE_POLL):
+		int_val = (int32)dhd_console_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DCONSOLE_POLL):
+		dhd_console_ms = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_CONS):
+		if (len > 0)
+			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+		break;
+#endif
+
+	case IOV_SVAL(IOV_CLEARCOUNTS):
+		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+		dhd_pub->rx_dropped = 0;
+		dhd_pub->rx_readahead_cnt = 0;
+		dhd_pub->tx_realloc = 0;
+		dhd_pub->wd_dpc_sched = 0;
+		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+		dhd_bus_clearcounts(dhd_pub);
+		break;
+
+
+	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+		break;
+	}
+
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+	 * because an encryption/rsn mismatch results in both events, and
+	 * the important information is in the WLC_E_PRUNE.
+	 */
+	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+	      dhd_conn_event == WLC_E_PRUNE)) {
+		dhd_conn_event = event;
+		dhd_conn_status = status;
+		dhd_conn_reason = reason;
+	}
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+	void *p;
+	int eprec = -1;		/* precedence to evict from */
+	bool discard_oldest;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		pktq_penq(q, prec, pkt);
+		return TRUE;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec))
+		eprec = prec;
+	else if (pktq_full(q)) {
+		p = pktq_peek_tail(q, &eprec);
+		ASSERT(p);
+		if (eprec > prec)
+			return FALSE;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(q, eprec));
+		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+		if (eprec == prec && !discard_oldest)
+			return FALSE;		/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+		if (p == NULL) {
+			DHD_ERROR(("%s: pktq_penq() failed, oldest %d.",
+				__FUNCTION__, discard_oldest));
+			ASSERT(p);
+		}
+
+		PKTFREE(dhdp->osh, p, TRUE);
+	}
+
+	/* Enqueue */
+	p = pktq_penq(q, prec, pkt);
+	if (p == NULL) {
+		DHD_ERROR(("%s: pktq_penq() failed.", __FUNCTION__));
+		ASSERT(p);
+	}
+
+	return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+             void *params, int plen, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	int val_size;
+	const bcm_iovar_t *vi = NULL;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
+{
+	int bcmerror = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!buf) return BCME_BADARG;
+
+	switch (ioc->cmd) {
+	case DHD_GET_MAGIC:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_MAGIC;
+		break;
+
+	case DHD_GET_VERSION:
+		if (buflen < sizeof(int))
+			bcmerror = -BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_VERSION;
+		break;
+
+	case DHD_GET_VAR:
+	case DHD_SET_VAR: {
+		char *arg;
+		uint arglen;
+
+		/* scan past the name to any arguments */
+		for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--);
+
+		if (*arg) {
+			bcmerror = BCME_BUFTOOSHORT;
+			break;
+		}
+
+		/* account for the NUL terminator */
+		arg++, arglen--;
+
+		/* call with the appropriate arguments */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+			buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* not in generic table, try protocol module */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+			                             arglen, buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+			                             NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* if still not found, try bus module */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+			                            arg, arglen, buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+			                            NULL, 0, arg, arglen, IOV_SET);
+
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+	uint i, status, reason;
+	bool group = FALSE, flush_txq = FALSE, link = FALSE;
+	char *auth_str, *event_name;
+	uchar *buf;
+	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+	static struct {uint event; char *event_name;} event_names[] = {
+		{WLC_E_SET_SSID, "SET_SSID"},
+		{WLC_E_JOIN, "JOIN"},
+		{WLC_E_START, "START"},
+		{WLC_E_AUTH, "AUTH"},
+		{WLC_E_AUTH_IND, "AUTH_IND"},
+		{WLC_E_DEAUTH, "DEAUTH"},
+		{WLC_E_DEAUTH_IND, "DEAUTH_IND"},
+		{WLC_E_ASSOC, "ASSOC"},
+		{WLC_E_ASSOC_IND, "ASSOC_IND"},
+		{WLC_E_REASSOC, "REASSOC"},
+		{WLC_E_REASSOC_IND, "REASSOC_IND"},
+		{WLC_E_DISASSOC, "DISASSOC"},
+		{WLC_E_DISASSOC_IND, "DISASSOC_IND"},
+		{WLC_E_QUIET_START, "START_QUIET"},
+		{WLC_E_QUIET_END, "END_QUIET"},
+		{WLC_E_BEACON_RX, "BEACON_RX"},
+		{WLC_E_LINK, "LINK"},
+		{WLC_E_MIC_ERROR, "MIC_ERROR"},
+		{WLC_E_NDIS_LINK, "NDIS_LINK"},
+		{WLC_E_ROAM, "ROAM"},
+		{WLC_E_TXFAIL, "TXFAIL"},
+		{WLC_E_PMKID_CACHE, "PMKID_CACHE"},
+		{WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF"},
+		{WLC_E_PRUNE, "PRUNE"},
+		{WLC_E_AUTOAUTH, "AUTOAUTH"},
+		{WLC_E_EAPOL_MSG, "EAPOL_MSG"},
+		{WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE"},
+		{WLC_E_ADDTS_IND, "ADDTS_IND"},
+		{WLC_E_DELTS_IND, "DELTS_IND"},
+		{WLC_E_BCNSENT_IND, "BCNSENT_IND"},
+		{WLC_E_BCNRX_MSG, "BCNRX_MSG"},
+		{WLC_E_BCNLOST_MSG, "BCNLOST_MSG"},
+		{WLC_E_ROAM_PREP, "ROAM_PREP"},
+		{WLC_E_PFN_NET_FOUND, "PNO_NET_FOUND"},
+		{WLC_E_PFN_NET_LOST, "PNO_NET_LOST"},
+		{WLC_E_RESET_COMPLETE, "RESET_COMPLETE"},
+		{WLC_E_JOIN_START, "JOIN_START"},
+		{WLC_E_ROAM_START, "ROAM_START"},
+		{WLC_E_ASSOC_START, "ASSOC_START"},
+		{WLC_E_IBSS_ASSOC, "IBSS_ASSOC"},
+		{WLC_E_RADIO, "RADIO"},
+		{WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG"},
+		{WLC_E_PROBREQ_MSG, "PROBREQ_MSG"},
+		{WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"},
+		{WLC_E_PSK_SUP, "PSK_SUP"},
+		{WLC_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"},
+		{WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"},
+		{WLC_E_ICV_ERROR, "ICV_ERROR"},
+		{WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"},
+		{WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"},
+		{WLC_E_TRACE, "TRACE"},
+		{WLC_E_ACTION_FRAME, "ACTION FRAME"},
+		{WLC_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"},
+		{WLC_E_IF, "IF"},
+		{WLC_E_RSSI, "RSSI"},
+		{WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+	};
+	uint event_type, flags, auth_type, datalen;
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	reason = ntoh32(event->reason);
+	auth_type = ntoh32(event->auth_type);
+	datalen = ntoh32(event->datalen);
+	/* debug dump of event messages */
+	sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+	        (uchar)event->addr.octet[0]&0xff,
+	        (uchar)event->addr.octet[1]&0xff,
+	        (uchar)event->addr.octet[2]&0xff,
+	        (uchar)event->addr.octet[3]&0xff,
+	        (uchar)event->addr.octet[4]&0xff,
+	        (uchar)event->addr.octet[5]&0xff);
+
+	event_name = "UNKNOWN";
+	for (i = 0; i < ARRAYSIZE(event_names); i++) {
+		if (event_names[i].event == event_type)
+			event_name = event_names[i].event_name;
+	}
+
+	DHD_EVENT(("EVENT: %s, event ID = %d\n", event_name, event_type));
+
+	if (flags & WLC_EVENT_MSG_LINK)
+		link = TRUE;
+	if (flags & WLC_EVENT_MSG_GROUP)
+		group = TRUE;
+	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+		flush_txq = TRUE;
+
+	switch (event_type) {
+	case WLC_E_START:
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC:
+	case WLC_E_REASSOC:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+			       event_name, eabuf, (int)reason));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+			       event_name, eabuf, (int)status));
+		}
+		break;
+
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+		break;
+
+	case WLC_E_AUTH:
+	case WLC_E_AUTH_IND:
+		if (auth_type == DOT11_OPEN_SYSTEM)
+			auth_str = "Open System";
+		else if (auth_type == DOT11_SHARED_KEY)
+			auth_str = "Shared Key";
+		else {
+			sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+			auth_str = err_msg;
+		}
+		if (event_type == WLC_E_AUTH_IND) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+			       event_name, eabuf, auth_str, (int)reason));
+		}
+
+		break;
+
+	case WLC_E_JOIN:
+	case WLC_E_ROAM:
+	case WLC_E_SET_SSID:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+		} else if (status == WLC_E_STATUS_NO_NETWORKS) {
+			DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+				event_name, (int)status));
+		}
+		break;
+
+	case WLC_E_BEACON_RX:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+		}
+		break;
+
+	case WLC_E_LINK:
+		DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+		break;
+
+	case WLC_E_MIC_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+		       event_name, eabuf, group, flush_txq));
+		break;
+
+	case WLC_E_ICV_ERROR:
+	case WLC_E_UNICAST_DECODE_ERROR:
+	case WLC_E_MULTICAST_DECODE_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+		       event_name, eabuf));
+		break;
+
+	case WLC_E_TXFAIL:
+		DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_SCAN_COMPLETE:
+	case WLC_E_PMKID_CACHE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+	case WLC_E_PFN_SCAN_COMPLETE:
+		DHD_EVENT(("PNOEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PSK_SUP:
+	case WLC_E_PRUNE:
+		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+		           event_name, (int)status, (int)reason));
+		break;
+
+	case WLC_E_TRACE:
+		{
+			static uint32 seqnum_prev = 0;
+			msgtrace_hdr_t hdr;
+			uint32 nblost;
+			char *s, *p;
+
+			buf = (uchar *) event_data;
+			memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+			if (hdr.version != MSGTRACE_VERSION) {
+				printf("\nMACEVENT: %s [unsupported version --> "
+				       "dhd version:%d dongle version:%d]\n",
+				       event_name, MSGTRACE_VERSION, hdr.version);
+				/* Reset datalen to avoid display below */
+				datalen = 0;
+				break;
+			}
+
+			/* There are 2 bytes available at the end of data */
+			buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+			if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+				printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+				       "discarded_bytes %d discarded_printf %d]\n",
+				       ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+			}
+
+			nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+			if (nblost > 0) {
+				printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
+				        ntoh32(hdr.seqnum), nblost);
+			}
+			seqnum_prev = ntoh32(hdr.seqnum);
+
+			/* Display the trace buffer. Advance from \n to \n to avoid display big
+			 * printf (issue with Linux printk )
+			 */
+			p = (char *)&buf[MSGTRACE_HDRLEN];
+			while ((s = strstr(p, "\n")) != NULL) {
+				*s = '\0';
+				printf("%s\n", p);
+				p = s + 1;
+			}
+			printf("%s\n", p);
+
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+		}
+		break;
+
+
+	case WLC_E_RSSI:
+		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+		break;
+
+	default:
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+		       event_name, event_type, eabuf, (int)status, (int)reason,
+		       (int)auth_type));
+		break;
+	}
+
+	/* show any appended data */
+	if (datalen) {
+		buf = (uchar *) event_data;
+		DHD_EVENT((" data (%d) : ", datalen));
+		for (i = 0; i < datalen; i++)
+			DHD_EVENT((" 0x%02x ", *buf++));
+		DHD_EVENT(("\n"));
+	}
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(struct dhd_info *dhd, int *ifidx, void *pktdata,
+              wl_event_msg_t *event, void **data_ptr)
+{
+	/* check whether packet is a BRCM event pkt */
+	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+	char *event_data;
+	uint32 type, status;
+	uint16 flags;
+	int evlen;
+
+	if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+		DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+	if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+		DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	*data_ptr = &pvt_data[1];
+	event_data = *data_ptr;
+
+	/* memcpy since BRCM event pkt may be unaligned. */
+	memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+	type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+	evlen = ntoh32_ua((void *)&event->datalen) + sizeof(bcm_event_t);
+
+	switch (type) {
+		case WLC_E_IF:
+			{
+				dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+				DHD_TRACE(("%s: if event\n", __FUNCTION__));
+
+				if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS)
+				{
+					if (ifevent->action == WLC_E_IF_ADD)
+						dhd_add_if(dhd, ifevent->ifidx,
+							NULL, event->ifname,
+							pvt_data->eth.ether_dhost,
+							ifevent->flags, ifevent->bssidx);
+					else
+						dhd_del_if(dhd, ifevent->ifidx);
+				} else {
+					DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+						__FUNCTION__, ifevent->ifidx, event->ifname));
+				}
+			}
+			/* send up the if event: btamp user needs it */
+			*ifidx = dhd_ifname2idx(dhd, event->ifname);
+			/* push up to external supp/auth */
+			dhd_event(dhd, (char *)pvt_data, evlen, *ifidx);
+			break;
+
+
+#ifdef P2P
+		case WLC_E_NDIS_LINK:
+			break;
+#endif
+		/* fall through */
+		/* These are what external supplicant/authenticator wants */
+		case WLC_E_LINK:
+		case WLC_E_ASSOC_IND:
+		case WLC_E_REASSOC_IND:
+		case WLC_E_DISASSOC_IND:
+		case WLC_E_MIC_ERROR:
+		default:
+		/* Fall through: this should get _everything_  */
+
+			*ifidx = dhd_ifname2idx(dhd, event->ifname);
+			/* push up to external supp/auth */
+			dhd_event(dhd, (char *)pvt_data, evlen, *ifidx);
+			DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+			           __FUNCTION__, type, flags, status));
+
+			/* put it back to WLC_E_NDIS_LINK */
+			if (type == WLC_E_NDIS_LINK) {
+				uint32 temp;
+
+				temp = ntoh32_ua((void *)&event->event_type);
+				DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+
+				temp = ntoh32(WLC_E_NDIS_LINK);
+				memcpy((void *)(&pvt_data->event.event_type), &temp,
+					sizeof(pvt_data->event.event_type));
+			}
+			break;
+	}
+
+#ifdef SHOW_EVENTS
+	wl_show_host_event(event, event_data);
+#endif /* SHOW_EVENTS */
+
+	return (BCME_OK);
+}
+
+
+void
+wl_event_to_host_order(wl_event_msg_t *evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	 * byte order. Convert all members to host-order.
+	 */
+	evt->event_type = ntoh32(evt->event_type);
+	evt->flags = ntoh16(evt->flags);
+	evt->status = ntoh32(evt->status);
+	evt->reason = ntoh32(evt->reason);
+	evt->auth_type = ntoh32(evt->auth_type);
+	evt->datalen = ntoh32(evt->datalen);
+	evt->version = ntoh16(evt->version);
+}
+
+void print_buf(void *pbuf, int len, int bytes_per_line)
+{
+	int i, j = 0;
+	unsigned char *buf = pbuf;
+
+	if (bytes_per_line == 0) {
+		bytes_per_line = len;
+	}
+
+	for (i = 0; i < len; i++) {
+		printf("%2.2x", *buf++);
+		j++;
+		if (j == bytes_per_line) {
+			printf("\n");
+			j = 0;
+		} else {
+			printf(":");
+		}
+	}
+	printf("\n");
+}
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+#ifdef PKT_FILTER_SUPPORT
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		strncpy(num, src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+	char				*argv[8];
+	int					i = 0;
+	const char 			*str;
+	int					buf_len;
+	int					str_len;
+	char				*arg_save = 0, *arg_org = 0;
+	int					rc;
+	char				buf[128];
+	wl_pkt_filter_enable_t	enable_parm;
+	wl_pkt_filter_enable_t	* pkt_filterp;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	arg_org = arg_save;
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (NULL == argv[i]) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_enable";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[str_len] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+	/* Parse enable/disable value. */
+	enable_parm.enable = htod32(enable);
+
+	buf_len += sizeof(enable_parm);
+	memcpy((char *)pkt_filterp,
+	       &enable_parm,
+	       sizeof(enable_parm));
+
+	/* Enable/disable the specified filter. */
+	rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+	/* Contorl the master mode */
+	bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+	rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+	const char 			*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int					buf_len;
+	int					str_len;
+	int 				rc;
+	uint32				mask_size;
+	uint32				pattern_size;
+	char				*argv[8], * buf = 0;
+	int					i = 0;
+	char				*arg_save = 0, *arg_org = 0;
+#define BUF_SIZE		2048
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	arg_org = arg_save;
+
+	if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	if (strlen(arg) > BUF_SIZE) {
+		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+		goto fail;
+	}
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+	while (argv[i++])
+		argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (NULL == argv[i]) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+	if (NULL == argv[++i]) {
+		DHD_ERROR(("Polarity not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+	if (NULL == argv[++i]) {
+		DHD_ERROR(("Filter type not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+	if (NULL == argv[++i]) {
+		DHD_ERROR(("Offset not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+	if (NULL == argv[++i]) {
+		DHD_ERROR(("Bitmask not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter mask. */
+	mask_size =
+		htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+	if (NULL == argv[++i]) {
+		DHD_ERROR(("Pattern not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter pattern. */
+	pattern_size =
+		htod32(wl_pattern_atoh(argv[i],
+	         (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		DHD_ERROR(("Mask and pattern not the same size\n"));
+		goto fail;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+	** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	** guarantee that the buffer is properly aligned.
+	*/
+	memcpy((char *)pkt_filterp,
+	       &pkt_filter,
+	       WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+	rc = rc >= 0 ? 0 : rc;
+
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+	if (buf)
+		MFREE(dhd->osh, buf, BUF_SIZE);
+}
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+	char iovbuf[32];
+	int retcode;
+
+	bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+	retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+		__FUNCTION__, arp_mode, retcode));
+	else
+		DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+		__FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+	char iovbuf[32];
+	int retcode;
+
+	bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+	retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+		__FUNCTION__, arp_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+		__FUNCTION__, arp_enable));
+}
+#endif
+
+
+void dhd_arp_cleanup(dhd_pub_t *dhd)
+{
+#ifdef ARP_OFFLOAD_SUPPORT
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[128];
+
+	if (dhd == NULL) return;
+
+	dhd_os_proto_block(dhd);
+
+	iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+	iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+	dhd_os_proto_unblock(dhd);
+
+#endif /* ARP_OFFLOAD_SUPPORT */
+}
+
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, u32 ipaddr)
+{
+#ifdef ARP_OFFLOAD_SUPPORT
+	int iov_len = 0;
+	char iovbuf[32];
+	int retcode;
+
+	dhd_os_proto_block(dhd);
+
+	iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
+	retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len);
+
+	dhd_os_proto_unblock(dhd);
+
+	if (retcode)
+		DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ARP ipaddr entry added\n",
+		__FUNCTION__));
+#endif /* ARP_OFFLOAD_SUPPORT */
+}
+
+
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
+{
+#ifdef ARP_OFFLOAD_SUPPORT
+	int retcode;
+	int iov_len = 0;
+
+	if (!buf)
+		return -1;
+
+	dhd_os_proto_block(dhd);
+
+	iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+	retcode = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, buflen);
+
+	dhd_os_proto_unblock(dhd);
+
+	if (retcode) {
+		DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+		__FUNCTION__, retcode));
+
+		return -1;
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+	return 0;
+}
+
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+	uint up = 0;
+	char buf[128], *ptr;
+	uint power_mode = PM_FAST;
+	uint32 dongle_align = DHD_SDALIGN;
+	uint32 glom = 0;
+	uint bcn_timeout = 4;
+	int scan_assoc_time = 40;
+	int scan_unassoc_time = 40;
+	uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(SOFTAP)
+	uint dtim = 1;
+#endif
+	int ret = 0;
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	dhd_os_proto_block(dhd);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	/*
+	** Read MAC address from external customer place
+	** NOTE that default mac address has to be present in otp or nvram file
+	** to bring up firmware but unique per board mac address maybe provided
+	** by customer code
+	*/
+	ret = dhd_custom_get_mac_address(ea_addr.octet);
+	if (!ret) {
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+	if (strstr(fw_path, "apsta") != NULL) {
+		uint rand_mac;
+
+		srandom32((uint)jiffies);
+		rand_mac = random32();
+		iovbuf[0] = 0x02;              /* locally administered bit */
+		iovbuf[1] = 0x1A;
+		iovbuf[2] = 0x11;
+		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+		iovbuf[4] = (unsigned char)(rand_mac >> 8);
+		iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+		printk("Broadcom Dongle Host Driver mac=%02x:%02x:%02x:%02x:%02x:%02x\n",
+			iovbuf[0], iovbuf[1], iovbuf[2], iovbuf[3], iovbuf[4], iovbuf[5]);
+
+		bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+	}
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+	/* Set Country code */
+	if (dhd->dhd_cspec.ccode[0] != 0) {
+		bcm_mkiovar("country", (char *)&dhd->dhd_cspec, \
+			sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+		if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) {
+			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+		}
+	}
+
+	/* Set Listen Interval */
+	bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0)
+		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	bcm_mkiovar("ver", 0, 0, buf, sizeof(buf));
+	dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf));
+	bcmstrtok(&ptr, "\n", 0);
+	/* Print fw version info */
+	DHD_ERROR(("Firmware version = %s\n", buf));
+
+	/* Set PowerSave mode */
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode));
+
+	/* Match Host and Dongle rx alignment */
+	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+	/* disable glom option per default */
+	bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+	/* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */
+	bcm_mkiovar("roam_off", (char *)&dhd_roam, 4, iovbuf, sizeof(iovbuf));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == TRUE) {
+		dhdcdc_set_ioctl(dhd, 0, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim));
+	}
+#endif
+
+	if (dhd_roam == 0)
+	{
+		/* set internal roaming roaming parameters */
+		int roam_scan_period = 30; /* in sec */
+		int roam_fullscan_period = 120; /* in sec */
+		int roam_trigger = -85;
+		int roam_delta = 15;
+		int band;
+		int band_temp_set = WLC_BAND_2G;
+
+		if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, \
+			(char *)&roam_scan_period, sizeof(roam_scan_period)) < 0)
+			DHD_ERROR(("%s: roam scan setup failed\n", __FUNCTION__));
+
+		bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, \
+					 4, iovbuf, sizeof(iovbuf));
+		if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, \
+			iovbuf, sizeof(iovbuf)) < 0)
+			DHD_ERROR(("%s: roam fullscan setup failed\n", __FUNCTION__));
+
+		if (dhdcdc_query_ioctl(dhd, 0, WLC_GET_BAND, \
+				(char *)&band, sizeof(band)) < 0)
+			DHD_ERROR(("%s: roam delta setting failed\n", __FUNCTION__));
+		else {
+			if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_ALL))
+			{
+				/* temp set band to insert new roams values */
+				if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_BAND, \
+					(char *)&band_temp_set, sizeof(band_temp_set)) < 0)
+					DHD_ERROR(("%s: local band seting failed\n", __FUNCTION__));
+			}
+			if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_DELTA, \
+				(char *)&roam_delta, sizeof(roam_delta)) < 0)
+				DHD_ERROR(("%s: roam delta setting failed\n", __FUNCTION__));
+
+			if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_ROAM_TRIGGER, \
+				(char *)&roam_trigger, sizeof(roam_trigger)) < 0)
+				DHD_ERROR(("%s: roam trigger setting failed\n", __FUNCTION__));
+
+			/* Restore original band settinngs */
+			if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_BAND, \
+				(char *)&band, sizeof(band)) < 0)
+				DHD_ERROR(("%s: Original band restore failed\n", __FUNCTION__));
+		}
+	}
+
+	/* Force STA UP */
+	if (dhd_radio_up)
+		dhdcdc_set_ioctl(dhd, 0, WLC_UP, (char *)&up, sizeof(up));
+
+	/* Setup event_msgs */
+	bcm_mkiovar("event_msgs", dhd->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+		sizeof(scan_assoc_time));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+		sizeof(scan_unassoc_time));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	/* Set and enable ARP offload feature */
+	if (dhd_arp_enable)
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+	dhd_arp_offload_enable(dhd, dhd_arp_enable);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+	{
+		int i;
+		/* Set up pkt filter */
+		if (dhd_pkt_filter_enable) {
+			for (i = 0; i < dhd->pktfilter_count; i++) {
+				dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+				dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+					dhd_pkt_filter_init, dhd_master_mode);
+			}
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+	{
+	/* Set Keep Alive : be sure to use FW with -keepalive */
+	int res;
+
+	if (ap_fw_loaded == FALSE) {
+		if ((res = dhd_keep_alive_onoff(dhd, 1)) < 0)
+			DHD_ERROR(("%s set keeplive failed %d\n", \
+		__FUNCTION__, res));
+		}
+	}
+#endif
+
+	dhd_os_proto_unblock(dhd);
+
+	return 0;
+}
+
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+	iscan_buf_t *iscanbuf_alloc = 0;
+	iscan_buf_t *iscanbuf_head;
+
+	dhd_iscan_lock();
+
+	iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+	if (iscanbuf_alloc == NULL)
+		goto fail;
+
+	iscanbuf_alloc->next = NULL;
+	iscanbuf_head = *iscanbuf;
+
+	DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+		   "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+		   __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+	if (iscanbuf_head == NULL) {
+		*iscanbuf = iscanbuf_alloc;
+		DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+		goto fail;
+	}
+
+	while (iscanbuf_head->next)
+		iscanbuf_head = iscanbuf_head->next;
+
+	iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+	dhd_iscan_unlock();
+	return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+	iscan_buf_t *iscanbuf_free = 0;
+	iscan_buf_t *iscanbuf_prv = 0;
+	iscan_buf_t *iscanbuf_cur = iscan_chain;
+	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+
+	dhd_iscan_lock();
+	/* If iscan_delete is null then delete the entire
+	 * chain or else delete specific one provided
+	 */
+	if (!iscan_delete) {
+		while (iscanbuf_cur) {
+			iscanbuf_free = iscanbuf_cur;
+			iscanbuf_cur = iscanbuf_cur->next;
+			iscanbuf_free->next = 0;
+			MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+		}
+		iscan_chain = 0;
+	} else {
+		while (iscanbuf_cur) {
+			if (iscanbuf_cur == iscan_delete)
+				break;
+			iscanbuf_prv = iscanbuf_cur;
+			iscanbuf_cur = iscanbuf_cur->next;
+		}
+		if (iscanbuf_prv)
+			iscanbuf_prv->next = iscan_delete->next;
+
+		iscan_delete->next = 0;
+		MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+		if (!iscanbuf_prv)
+			iscan_chain = 0;
+	}
+	dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+	return iscan_chain;
+}
+
+
+
+/*
+* print scan cache
+* print partial iscan_skip list differently
+*/
+int
+dhd_iscan_print_cache(iscan_buf_t *iscan_skip)
+{
+	int i = 0, l = 0;
+	iscan_buf_t *iscan_cur;
+	wl_iscan_results_t *list;
+	wl_scan_results_t *results;
+	wl_bss_info_t UNALIGNED *bi;
+
+	dhd_iscan_lock();
+
+	iscan_cur = dhd_iscan_result_buf();
+
+	while (iscan_cur) {
+		list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
+		if (!list)
+			break;
+
+		results = (wl_scan_results_t *)&list->results;
+		if (!results)
+			break;
+
+		if (results->version != WL_BSS_INFO_VERSION) {
+			DHD_ISCAN(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
+				__FUNCTION__, results->version));
+			goto done;
+		}
+
+		bi = results->bss_info;
+		for (i = 0; i < results->count; i++) {
+			if (!bi)
+				break;
+
+			DHD_ISCAN(("%s[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n",
+				iscan_cur != iscan_skip?"BSS":"bss", l, i,
+				bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2],
+				bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5]));
+
+			bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+		}
+		iscan_cur = iscan_cur->next;
+		l++;
+	}
+
+done:
+	dhd_iscan_unlock();
+	return 0;
+}
+
+/*
+* delete disappeared AP from specific scan cache but skip partial list in iscan_skip
+*/
+int
+dhd_iscan_delete_bss(void *dhdp, void *addr, iscan_buf_t *iscan_skip)
+{
+	int i = 0, j = 0, l = 0;
+	iscan_buf_t *iscan_cur;
+	wl_iscan_results_t *list;
+	wl_scan_results_t *results;
+	wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next;
+
+	uchar *s_addr = addr;
+
+	dhd_iscan_lock();
+	DHD_ISCAN(("%s: BSS to remove %X:%X:%X:%X:%X:%X\n",
+		__FUNCTION__, s_addr[0], s_addr[1], s_addr[2],
+		s_addr[3], s_addr[4], s_addr[5]));
+
+	iscan_cur = dhd_iscan_result_buf();
+
+	while (iscan_cur) {
+		if (iscan_cur != iscan_skip) {
+			list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
+			if (!list)
+				break;
+
+			results = (wl_scan_results_t *)&list->results;
+			if (!results)
+				break;
+
+			if (results->version != WL_BSS_INFO_VERSION) {
+				DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
+				__FUNCTION__, results->version));
+				goto done;
+			}
+
+			bi = results->bss_info;
+			for (i = 0; i < results->count; i++) {
+				if (!bi)
+					break;
+
+				if (!memcmp(bi->BSSID.octet, addr, ETHER_ADDR_LEN)) {
+					DHD_ISCAN(("%s: Del BSS[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n",
+					__FUNCTION__, l, i, bi->BSSID.octet[0],
+					bi->BSSID.octet[1], bi->BSSID.octet[2],
+					bi->BSSID.octet[3], bi->BSSID.octet[4],
+					bi->BSSID.octet[5]));
+
+					bi_new = bi;
+					bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+/*
+					if(bi && bi_new) {
+						bcopy(bi, bi_new, results->buflen -
+						dtoh32(bi_new->length));
+						results->buflen -= dtoh32(bi_new->length);
+					}
+*/
+					results->buflen -= dtoh32(bi_new->length);
+					results->count--;
+
+					for (j = i; j < results->count; j++) {
+						if (bi && bi_new) {
+							DHD_ISCAN(("%s: Moved up BSS[%2.2d:%2.2d]"
+							"%X:%X:%X:%X:%X:%X\n",
+							__FUNCTION__, l, j, bi->BSSID.octet[0],
+							bi->BSSID.octet[1], bi->BSSID.octet[2],
+							bi->BSSID.octet[3], bi->BSSID.octet[4],
+							bi->BSSID.octet[5]));
+
+							bi_next = (wl_bss_info_t *)((uintptr)bi +
+								dtoh32(bi->length));
+							bcopy(bi, bi_new, dtoh32(bi->length));
+							bi_new = (wl_bss_info_t *)((uintptr)bi_new +
+								dtoh32(bi_new->length));
+							bi = bi_next;
+						}
+					}
+
+					if (results->count == 0) {
+						/* Prune now empty partial scan list */
+						dhd_iscan_free_buf(dhdp, iscan_cur);
+						goto done;
+					}
+					break;
+				}
+				bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+			}
+		}
+		iscan_cur = iscan_cur->next;
+		l++;
+	}
+
+done:
+	dhd_iscan_unlock();
+	return 0;
+}
+
+int
+dhd_iscan_remove_duplicates(void * dhdp, iscan_buf_t *iscan_cur)
+{
+	int i = 0;
+	wl_iscan_results_t *list;
+	wl_scan_results_t *results;
+	wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next;
+
+	dhd_iscan_lock();
+
+	DHD_ISCAN(("%s: Scan cache before delete\n",
+		__FUNCTION__));
+	dhd_iscan_print_cache(iscan_cur);
+
+	if (!iscan_cur)
+		goto done;
+
+	list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
+	if (!list)
+		goto done;
+
+	results = (wl_scan_results_t *)&list->results;
+	if (!results)
+		goto done;
+
+	if (results->version != WL_BSS_INFO_VERSION) {
+		DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
+			__FUNCTION__, results->version));
+		goto done;
+	}
+
+	bi = results->bss_info;
+	for (i = 0; i < results->count; i++) {
+		if (!bi)
+			break;
+
+		DHD_ISCAN(("%s: Find dups for BSS[%2.2d] %X:%X:%X:%X:%X:%X\n",
+			__FUNCTION__, i, bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2],
+			bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5]));
+
+		dhd_iscan_delete_bss(dhdp, bi->BSSID.octet, iscan_cur);
+
+		bi = (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length));
+	}
+
+done:
+	DHD_ISCAN(("%s: Scan cache after delete\n", __FUNCTION__));
+	dhd_iscan_print_cache(iscan_cur);
+	dhd_iscan_unlock();
+	return 0;
+}
+
+void
+dhd_iscan_ind_scan_confirm(void *dhdp, bool status)
+{
+
+	dhd_ind_scan_confirm(dhdp, status);
+}
+
+int
+dhd_iscan_request(void * dhdp, uint16 action)
+{
+	int rc;
+	wl_iscan_params_t params;
+	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+	char buf[WLC_IOCTL_SMLEN];
+
+
+	memset(&params, 0, sizeof(wl_iscan_params_t));
+	memcpy(&params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+
+	params.params.bss_type = DOT11_BSSTYPE_ANY;
+	params.params.scan_type = DOT11_SCANTYPE_ACTIVE;
+
+	params.params.nprobes = htod32(-1);
+	params.params.active_time = htod32(-1);
+	params.params.passive_time = htod32(-1);
+	params.params.home_time = htod32(-1);
+	params.params.channel_num = htod32(0);
+
+	params.version = htod32(ISCAN_REQ_VERSION);
+	params.action = htod16(action);
+	params.scan_duration = htod16(0);
+
+	bcm_mkiovar("iscan", (char *)&params, sizeof(wl_iscan_params_t), buf, WLC_IOCTL_SMLEN);
+	rc = dhd_wl_ioctl(dhdp, WLC_SET_VAR, buf, WLC_IOCTL_SMLEN);
+
+	return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+	wl_iscan_results_t *list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	iscan_buf_t *iscan_cur;
+	int status = -1;
+	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+	int rc;
+
+
+	iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+	if (!iscan_cur) {
+		DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+		dhd_iscan_free_buf(dhdp, 0);
+		dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+		goto fail;
+	}
+
+	dhd_iscan_lock();
+
+	memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+		iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+	rc = dhd_wl_ioctl(dhdp, WLC_GET_VAR, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	*scan_count = results->count = dtoh32(results->count);
+	status = dtoh32(list_buf->status);
+
+	dhd_iscan_unlock();
+
+	if (!(*scan_count))
+		dhd_iscan_free_buf(dhdp, iscan_cur);
+	else
+		dhd_iscan_remove_duplicates(dhdp, iscan_cur);
+
+
+fail:
+	return status;
+}
+
+#endif
+
+/* Function to estimate possible DTIM_SKIP value */
+int dhd_get_dtim_skip(dhd_pub_t *dhd)
+{
+	int bcn_li_dtim;
+	char buf[128];
+	int ret;
+	int dtim_assoc = 0;
+
+	if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
+		bcn_li_dtim = 3;
+	else
+		bcn_li_dtim = dhd->dtim_skip;
+
+	/* Read DTIM value if associated */
+	memset(buf, 0, sizeof(buf));
+	bcm_mkiovar("dtim_assoc", 0, 0, buf, sizeof(buf));
+	if ((ret = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf))) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		bcn_li_dtim = 1;
+		goto exit;
+	}
+	else
+		dtim_assoc = dtoh32(*(int *)buf);
+
+	DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n", \
+			 __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
+
+	/* if not assocated just eixt */
+	if (dtim_assoc == 0) {
+		goto exit;
+	}
+
+	/* check if sta listen interval fits into AP dtim */
+	if (dtim_assoc > LISTEN_INTERVAL) {
+		/* AP DTIM to big for our Listen Interval : no dtim skiping */
+		bcn_li_dtim = 1;
+		DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", \
+				 __FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
+		goto exit;
+	}
+
+	if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
+		/* Round up dtim_skip to fit into STAs Listen Interval */
+		bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
+		DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+	}
+
+exit:
+	return bcn_li_dtim;
+}
+
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd)
+{
+	char iovbuf[128];
+	int pfn_enabled = 0;
+	int iov_len = 0;
+	int ret;
+
+	/* Disable pfn */
+	iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) >= 0) {
+		/* clear pfn */
+		iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+		if (iov_len) {
+			if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0) {
+				DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+			}
+		}
+		else {
+			ret = -1;
+			DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+		}
+	}
+	else
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+	return ret;
+}
+
+int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+	char iovbuf[128];
+	uint8 bssid[6];
+	int ret = -1;
+
+	if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
+		DHD_ERROR(("%s error exit\n", __FUNCTION__));
+		return ret;
+	}
+
+	memset(iovbuf, 0, sizeof(iovbuf));
+
+	/* Check if disassoc to enable pno */
+	if ((pfn_enabled) && \
+		((ret = dhdcdc_set_ioctl(dhd, 0, WLC_GET_BSSID, \
+				 (char *)&bssid, ETHER_ADDR_LEN)) == BCME_NOTASSOCIATED)) {
+		DHD_TRACE(("%s pno enable called in disassoc mode\n", __FUNCTION__));
+	}
+	else if (pfn_enabled) {
+		DHD_ERROR(("%s pno enable called in assoc mode ret=%d\n", \
+			__FUNCTION__, ret));
+		return ret;
+	}
+
+	/* Enable/disable PNO */
+	if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+		if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) {
+			DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		else {
+			dhd->pno_enable = pfn_enabled;
+			DHD_TRACE(("%s set pno as %d\n", __FUNCTION__, dhd->pno_enable));
+		}
+	}
+	else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+	return ret;
+}
+
+/* Function to execute combined scan */
+int
+dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr, \
+			int pno_repeat, int pno_freq_expo_max)
+{
+	int err = -1;
+	char iovbuf[128];
+	int k, i;
+	wl_pfn_param_t pfn_param;
+	wl_pfn_t	pfn_element;
+
+	DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
+
+	if ((!dhd) && (!ssids_local)) {
+		DHD_ERROR(("%s error exit\n", __FUNCTION__));
+		err = -1;
+	}
+
+	/* Check for broadcast ssid */
+	for (k = 0; k < nssid; k++) {
+		if (!ssids_local[k].SSID_len) {
+			DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+			return err;
+		}
+	}
+/* #define  PNO_DUMP 1 */
+#ifdef PNO_DUMP
+	{
+		int j;
+		for (j = 0; j < nssid; j++) {
+			DHD_ERROR(("%d: scan  for  %s size =%d\n", j,
+				ssids_local[j].SSID, ssids_local[j].SSID_len));
+		}
+	}
+#endif /* PNO_DUMP */
+
+	/* clean up everything */
+	if  ((err = dhd_pno_clean(dhd)) < 0) {
+		DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
+		return err;
+	}
+	memset(&pfn_param, 0, sizeof(pfn_param));
+	memset(&pfn_element, 0, sizeof(pfn_element));
+
+	/* set pfn parameters */
+	pfn_param.version = htod32(PFN_VERSION);
+	pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+	/* check and set extra pno params */
+	if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
+		pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+		pfn_param.repeat_scan = htod32(pno_repeat);
+		pfn_param.max_freq_adjust = htod32(pno_freq_expo_max);
+	}
+
+	/* set up pno scan fr */
+	if (scan_fr  != 0)
+		pfn_param.scan_freq = htod32(scan_fr);
+
+	if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
+		DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+		return err;
+	}
+	if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
+		DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+		return err;
+	}
+
+	bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+	dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+
+	/* set all pfn ssid */
+	for (i = 0; i < nssid; i++) {
+
+		pfn_element.bss_type = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+		pfn_element.auth = (DOT11_OPEN_SYSTEM);
+		pfn_element.infra = htod32(1);
+
+		memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
+		pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
+
+		if ((err =
+		bcm_mkiovar("pfn_add", (char *)&pfn_element,
+			sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+			if ((err =
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) {
+				DHD_ERROR(("%s failed for i=%d error=%d\n",
+					__FUNCTION__, i, err));
+				return err;
+			}
+			else
+				DHD_ERROR(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n", \
+					__FUNCTION__, pfn_param.scan_freq, \
+					pfn_param.repeat_scan, pfn_param.max_freq_adjust));
+		}
+		else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
+	}
+
+	/* Enable PNO */
+	/* dhd_pno_enable(dhd, 1); */
+	return err;
+}
+
+int dhd_pno_get_status(dhd_pub_t *dhd)
+{
+	int ret = -1;
+
+	if (!dhd)
+		return ret;
+	else
+		return (dhd->pno_enable);
+}
+
+#endif /* PNO_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on)
+{
+	char buf[256];
+	char *buf_ptr = buf;
+	wl_keep_alive_pkt_t keep_alive_pkt;
+	char * str;
+	int str_len, buf_len;
+	int res = 0;
+	int keep_alive_period = KEEP_ALIVE_PERIOD; /* in ms */
+
+	DHD_TRACE(("%s: ka:%d\n", __FUNCTION__, ka_on));
+
+	if (ka_on) { /* on suspend */
+		keep_alive_pkt.period_msec = keep_alive_period;
+
+	} else {
+		/* on resume, turn off keep_alive packets  */
+		keep_alive_pkt.period_msec = 0;
+	}
+
+	/* IOC var name  */
+	str = "keep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[str_len] = '\0';
+	buf_len = str_len + 1;
+
+	/* set ptr to IOCTL payload after the var name */
+	buf_ptr += buf_len; /* include term Z */
+
+	/* copy Keep-alive attributes from local var keep_alive_pkt */
+	str = NULL_PKT_STR;
+	keep_alive_pkt.len_bytes = strlen(str);
+
+	memcpy(buf_ptr, &keep_alive_pkt, WL_KEEP_ALIVE_FIXED_LEN);
+	buf_ptr += WL_KEEP_ALIVE_FIXED_LEN;
+
+	/* copy packet data */
+	memcpy(buf_ptr, str, keep_alive_pkt.len_bytes);
+	buf_len += (WL_KEEP_ALIVE_FIXED_LEN + keep_alive_pkt.len_bytes);
+
+	res = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+	return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+
+#if defined(CSCAN)
+
+/* Androd ComboSCAN support */
+/*
+ *  data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+                     int input_size, int *bytes_left)
+{
+	char* str = *list_str;
+	uint16 short_temp;
+	uint32 int_temp;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* Clean all dest bytes */
+	memset(dst, 0, dst_size);
+	while (*bytes_left > 0) {
+
+		if (str[0] != token) {
+			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+				__FUNCTION__, token, str[0], *bytes_left));
+			return -1;
+		}
+
+		*bytes_left -= 1;
+		str += 1;
+
+		if (input_size == 1) {
+			memcpy(dst, str, input_size);
+		}
+		else if (input_size == 2) {
+			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+				input_size);
+		}
+		else if (input_size == 4) {
+			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+				input_size);
+		}
+
+		*bytes_left -= input_size;
+		str += input_size;
+		*list_str = str;
+		return 1;
+	}
+	return 1;
+}
+
+/*
+ *  channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+                             int channel_num, int *bytes_left)
+{
+	char* str = *list_str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+			*list_str = str;
+			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* All channels */
+			channel_list[idx] = 0x0;
+		}
+		else {
+			channel_list[idx] = (uint16)str[0];
+			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
+		}
+		*bytes_left -= 1;
+		str += 1;
+
+		if (idx++ > 255) {
+			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/*
+ *  SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+	char* str =  *list_str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+			*list_str = str;
+			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+
+		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* Broadcast SSID */
+			ssid[idx].SSID_len = 0;
+			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+			*bytes_left -= 1;
+			str += 1;
+
+			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
+		}
+		else if (str[0] <= DOT11_MAX_SSID_LEN) {
+			/* Get proper SSID size */
+			ssid[idx].SSID_len = str[0];
+			*bytes_left -= 1;
+			str += 1;
+
+			/* Get SSID */
+			if (ssid[idx].SSID_len > *bytes_left) {
+				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+				return -1;
+			}
+
+			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+			*bytes_left -= ssid[idx].SSID_len;
+			str += ssid[idx].SSID_len;
+
+			DHD_TRACE(("%s :size=%d left=%d\n",
+				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+		}
+		else {
+			DHD_ERROR(("### SSID size more that %d\n", str[0]));
+			return -1;
+		}
+
+		if (idx++ >  max) {
+			DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx.  Max specifies size of the ssid array.  Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied.  Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+	char* str, *ptr;
+
+	if ((list_str == NULL) || (*list_str == NULL))
+		return -1;
+
+	for (str = *list_str; str != NULL; str = ptr) {
+
+		/* check for next TAG */
+		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+			*list_str	 = str + strlen(GET_CHANNEL);
+			return idx;
+		}
+
+		if ((ptr = strchr(str, ',')) != NULL) {
+			*ptr++ = '\0';
+		}
+
+		if (strlen(str) > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+			return -1;
+		}
+
+		if (strlen(str) == 0)
+			ssid[idx].SSID_len = 0;
+
+		if (idx < max) {
+			strcpy((char*)ssid[idx].SSID, str);
+			ssid[idx].SSID_len = strlen(str);
+		}
+		idx++;
+	}
+	return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+	int num;
+	int val;
+	char* str;
+	char* endptr = NULL;
+
+	if ((list_str == NULL)||(*list_str == NULL))
+		return -1;
+
+	str = *list_str;
+	num = 0;
+	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+		val = (int)strtoul(str, &endptr, 0);
+		if (endptr == str) {
+			printf("could not parse channel number starting at"
+				" substring \"%s\" in list:\n%s\n",
+				str, *list_str);
+			return -1;
+		}
+		str = endptr + strspn(endptr, " ,");
+
+		if (num == channel_num) {
+			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+				channel_num, *list_str));
+			return -1;
+		}
+
+		channel_list[num++] = (uint16)val;
+	}
+	*list_str = str;
+	return num;
+}
+
+#endif
diff --git a/drivers/net/wireless/bcm4329/dhd_custom_gpio.c b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c
new file mode 100644
index 0000000..4d32863
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c
@@ -0,0 +1,272 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2010, Broadcom Corporation
+* 
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c,v 1.1.4.8.4.4 2011/01/20 20:23:09 Exp $
+*/
+
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern  void bcm_wlan_power_off(int);
+extern  void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+int wifi_set_carddetect(int on);
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#endif
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC)  */
+
+#ifdef CUSTOMER_HW3
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion  */
+static int dhd_oob_gpio_num = -1; /* GG 19 */
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+{
+	int  host_oob_irq = 0;
+
+#ifdef CUSTOMER_HW2
+	host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
+
+#else /* for NOT  CUSTOMER_HW2 */
+#if defined(CUSTOM_OOB_GPIO_NUM)
+	if (dhd_oob_gpio_num < 0) {
+		dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+	}
+#endif
+
+	if (dhd_oob_gpio_num < 0) {
+		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+			__FUNCTION__));
+		return (dhd_oob_gpio_num);
+	}
+
+	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+	         __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW
+	host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
+#elif defined CUSTOMER_HW3
+	gpio_request(dhd_oob_gpio_num, "oob irq");
+	host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+	gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW */
+#endif /* CUSTOMER_HW2 */
+
+	return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Customer function to control hw specific wlan gpios */
+void
+dhd_customer_gpio_wlan_ctrl(int onoff)
+{
+	switch (onoff) {
+		case WLAN_RESET_OFF:
+			WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_off(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+			wifi_set_power(0, 0);
+#endif
+			WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+		break;
+
+		case WLAN_RESET_ON:
+			WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_on(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+			wifi_set_power(1, 0);
+#endif
+			WL_ERROR(("=========== WLAN going back to live  ========\n"));
+		break;
+
+		case WLAN_POWER_OFF:
+			WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_off(1);
+#endif /* CUSTOMER_HW */
+		break;
+
+		case WLAN_POWER_ON:
+			WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_on(1);
+			/* Lets customer power to get stable */
+			OSL_DELAY(50);
+#endif /* CUSTOMER_HW */
+		break;
+	}
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+	int ret = 0;
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+
+	/* Customer access to MAC address stored outside of DHD driver */
+#ifdef CUSTOMER_HW2
+	ret = wifi_get_mac_addr(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+
+	return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+	{"",   "XY", 4},  /* universal */
+	{"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+	{"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+	{"EU", "EU", 5},  /* European union countries */
+	{"AT", "EU", 5},
+	{"BE", "EU", 5},
+	{"BG", "EU", 5},
+	{"CY", "EU", 5},
+	{"CZ", "EU", 5},
+	{"DK", "EU", 5},
+	{"EE", "EU", 5},
+	{"FI", "EU", 5},
+	{"FR", "EU", 5},
+	{"DE", "EU", 5},
+	{"GR", "EU", 5},
+	{"HU", "EU", 5},
+	{"IE", "EU", 5},
+	{"IT", "EU", 5},
+	{"LV", "EU", 5},
+	{"LI", "EU", 5},
+	{"LT", "EU", 5},
+	{"LU", "EU", 5},
+	{"MT", "EU", 5},
+	{"NL", "EU", 5},
+	{"PL", "EU", 5},
+	{"PT", "EU", 5},
+	{"RO", "EU", 5},
+	{"SK", "EU", 5},
+	{"SI", "EU", 5},
+	{"ES", "EU", 5},
+	{"SE", "EU", 5},
+	{"GB", "EU", 5},  /* input ISO "GB" to : EU regrev 05 */
+	{"IL", "IL", 0},
+	{"CH", "CH", 0},
+	{"TR", "TR", 0},
+	{"NO", "NO", 0},
+	{"KR", "XY", 3},
+	{"AU", "XY", 3},
+	{"CN", "XY", 3},  /* input ISO "CN" to : XY regrev 03 */
+	{"TW", "XY", 3},
+	{"AR", "XY", 3},
+	{"MX", "XY", 3}
+#endif /* EXAMPLE_TABLE */
+};
+
+
+/* Customized Locale convertor
+*  input : ISO 3166-1 country abbreviation
+*  output: customized cspec
+*/
+void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+{
+#ifdef CUSTOMER_HW2
+	struct cntry_locales_custom *cloc_ptr;
+
+	if (!cspec)
+		return;
+
+	cloc_ptr = wifi_get_country_code(country_iso_code);
+	if (cloc_ptr) {
+		strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+		cspec->rev = cloc_ptr->custom_locale_rev;
+	}
+	return;
+#else
+	int size, i;
+
+	size = ARRAYSIZE(translate_custom_table);
+
+	if (cspec == 0)
+		return;
+
+	if (size == 0)
+		return;
+
+	for (i = 0; i < size; i++) {
+		if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+			memcpy(cspec->ccode, translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+			cspec->rev = translate_custom_table[i].custom_locale_rev;
+			return;
+		}
+	}
+	memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+	cspec->rev = translate_custom_table[0].custom_locale_rev;
+	return;
+#endif
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_dbg.h b/drivers/net/wireless/bcm4329/dhd_dbg.h
new file mode 100644
index 0000000..b48c1d7
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_dbg.h
@@ -0,0 +1,100 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h,v 1.5.6.2.4.2.14.10 2010/05/21 21:49:38 Exp $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#ifdef DHD_DEBUG
+
+#define DHD_ERROR(args)	       do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+								printf args;} while (0)
+#define DHD_TRACE(args)		do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args)		do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args)		do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args)		do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args)		do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args)		do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args)		do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args)		do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args)		do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args)		do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args)		do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+
+#define DHD_ERROR_ON()		(dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON()		(dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON()		(dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON()		(dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON()		(dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON()		(dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON()		(dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON()		(dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON()		(dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON()		(dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON()		(dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON()		(dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON()		(dhd_msg_level & DHD_ISCAN_VAL)
+
+#else /* DHD_DEBUG */
+
+#define DHD_ERROR(args)    	do {if (net_ratelimit()) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+
+#define DHD_ERROR_ON()		0
+#define DHD_TRACE_ON()		0
+#define DHD_INFO_ON()		0
+#define DHD_DATA_ON()		0
+#define DHD_CTL_ON()		0
+#define DHD_TIMER_ON()		0
+#define DHD_HDRS_ON()		0
+#define DHD_BYTES_ON()		0
+#define DHD_INTR_ON()		0
+#define DHD_GLOM_ON()		0
+#define DHD_EVENT_ON()		0
+#define DHD_BTA_ON()		0
+#define DHD_ISCAN_ON()		0
+#endif /* DHD_DEBUG */
+
+#define DHD_LOG(args)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_linux.c b/drivers/net/wireless/bcm4329/dhd_linux.c
new file mode 100644
index 0000000..f1a25fe
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_linux.c
@@ -0,0 +1,3399 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104.4.40 2011/02/03 19:55:18 Exp $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/inetdevice.h>
+#include <linux/mutex.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <wl_iw.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef CUSTOMER_HW2
+#include <linux/platform_device.h>
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#include <linux/wlan_plat.h>
+static struct wifi_platform_data *wifi_control_data = NULL;
+#endif
+struct semaphore wifi_control_sem;
+
+static struct resource *wifi_irqres = NULL;
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr)
+{
+	if (wifi_irqres) {
+		*irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
+		return (int)wifi_irqres->start;
+	}
+#ifdef CUSTOM_OOB_GPIO_NUM
+	return CUSTOM_OOB_GPIO_NUM;
+#else
+	return -1;
+#endif
+}
+
+int wifi_set_carddetect(int on)
+{
+	printk("%s = %d\n", __FUNCTION__, on);
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	if (wifi_control_data && wifi_control_data->set_carddetect) {
+		wifi_control_data->set_carddetect(on);
+	}
+#endif
+	return 0;
+}
+
+int wifi_set_power(int on, unsigned long msec)
+{
+	printk("%s = %d\n", __FUNCTION__, on);
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	if (wifi_control_data && wifi_control_data->set_power) {
+		wifi_control_data->set_power(on);
+	}
+#endif
+	if (msec)
+		mdelay(msec);
+	return 0;
+}
+
+int wifi_set_reset(int on, unsigned long msec)
+{
+	DHD_TRACE(("%s = %d\n", __FUNCTION__, on));
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	if (wifi_control_data && wifi_control_data->set_reset) {
+		wifi_control_data->set_reset(on);
+	}
+#endif
+	if (msec)
+		mdelay(msec);
+	return 0;
+}
+
+int wifi_get_mac_addr(unsigned char *buf)
+{
+	DHD_TRACE(("%s\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	if (wifi_control_data && wifi_control_data->get_mac_addr) {
+		return wifi_control_data->get_mac_addr(buf);
+	}
+#endif
+	return -EOPNOTSUPP;
+}
+
+void *wifi_get_country_code(char *ccode)
+{
+	DHD_TRACE(("%s\n", __FUNCTION__));
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	if (!ccode)
+		return NULL;
+	if (wifi_control_data && wifi_control_data->get_country_code) {
+		return wifi_control_data->get_country_code(ccode);
+	}
+#endif
+	return NULL;
+}
+
+static int wifi_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	struct wifi_platform_data *wifi_ctrl =
+		(struct wifi_platform_data *)(pdev->dev.platform_data);
+
+	wifi_control_data = wifi_ctrl;
+#endif
+
+	DHD_TRACE(("## %s\n", __FUNCTION__));
+	wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
+
+	wifi_set_power(1, 0);	/* Power On */
+	wifi_set_carddetect(1);	/* CardDetect (0->1) */
+
+	up(&wifi_control_sem);
+	return 0;
+}
+
+static int wifi_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+	struct wifi_platform_data *wifi_ctrl =
+		(struct wifi_platform_data *)(pdev->dev.platform_data);
+
+	wifi_control_data = wifi_ctrl;
+#endif
+	DHD_TRACE(("## %s\n", __FUNCTION__));
+	wifi_set_power(0, 0);	/* Power Off */
+	wifi_set_carddetect(0);	/* CardDetect (1->0) */
+
+	up(&wifi_control_sem);
+	return 0;
+}
+
+static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+static int wifi_resume(struct platform_device *pdev)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static struct platform_driver wifi_device = {
+	.probe          = wifi_probe,
+	.remove         = wifi_remove,
+	.suspend        = wifi_suspend,
+	.resume         = wifi_resume,
+	.driver         = {
+	.name   = "bcm4329_wlan",
+	}
+};
+
+int wifi_add_dev(void)
+{
+	DHD_TRACE(("## Calling platform_driver_register\n"));
+	return platform_driver_register(&wifi_device);
+}
+
+void wifi_del_dev(void)
+{
+	DHD_TRACE(("## Unregister platform_driver_register\n"));
+	platform_driver_unregister(&wifi_device);
+}
+#endif /* defined(CUSTOMER_HW2) */
+
+static int dhd_device_event(struct notifier_block *this, unsigned long event,
+				void *ptr);
+
+static struct notifier_block dhd_notifier = {
+	.notifier_call = dhd_device_event
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+	return "";
+}
+#endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(CONFIG_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+/* Interface control information */
+typedef struct dhd_if {
+	struct dhd_info *info;			/* back pointer to dhd_info */
+	/* OS/stack specifics */
+	struct net_device *net;
+	struct net_device_stats stats;
+	int 			idx;			/* iface idx in dongle */
+	int 			state;			/* interface state */
+	uint 			subunit;		/* subunit */
+	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	bool			attached;		/* Delayed attachment when unset */
+	bool			txflowcontrol;	/* Per interface flow control indicator */
+	char			name[IFNAMSIZ+1]; /* linux interface name */
+} dhd_if_t;
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(CONFIG_WIRELESS_EXT)
+	wl_iw_t		iw;		/* wireless extensions state (must be first) */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	dhd_pub_t pub;
+
+	/* OS/stack specifics */
+	dhd_if_t *iflist[DHD_MAX_IFS];
+
+	struct mutex proto_sem;
+	wait_queue_head_t ioctl_resp_wait;
+	struct timer_list timer;
+	bool wd_timer_valid;
+	struct tasklet_struct tasklet;
+	spinlock_t	sdlock;
+	spinlock_t	txqlock;
+	spinlock_t	dhd_lock;
+
+	/* Thread based operation */
+	bool threads_only;
+	struct mutex sdsem;
+	long watchdog_pid;
+	struct semaphore watchdog_sem;
+	struct completion watchdog_exited;
+	long dpc_pid;
+	struct semaphore dpc_sem;
+	struct completion dpc_exited;
+
+	/* Wakelocks */
+#ifdef CONFIG_HAS_WAKELOCK
+	struct wake_lock wl_wifi;   /* Wifi wakelock */
+	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+#endif
+	spinlock_t wl_lock;
+	int wl_count;
+	int wl_packet;
+
+	int hang_was_sent; /* flag that message was send at least once */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	struct mutex wl_start_lock; /* mutex when START called to prevent any other Linux calls */
+#endif
+	/* Thread to issue ioctl for multicast */
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+	bool set_multicast;
+	bool set_macaddress;
+	struct ether_addr macvalue;
+	wait_queue_head_t ctrl_wait;
+	atomic_t pend_8021x_cnt;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+} dhd_info_t;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+struct semaphore dhd_registration_sem;
+#define DHD_REGISTRATION_TIMEOUT  12000  /* msec : allowed time to finished dhd registration */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+/* Spawn a thread for system ioctls (set mac, set mcast) */
+uint dhd_sysioc = TRUE;
+module_param(dhd_sysioc, uint, 0);
+
+/* Watchdog interval */
+uint dhd_watchdog_ms = 10;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#ifdef DHD_DEBUG
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0);
+#endif /* DHD_DEBUG */
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+uint dhd_arp_mode = 0xb;
+module_param(dhd_arp_mode, uint, 0);
+
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+
+/*  Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 1);
+
+/* Watchdog thread priority, -1 to use kernel timer */
+int dhd_watchdog_prio = 97;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int dhd_dpc_prio = 98;
+module_param(dhd_dpc_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+extern int dhd_dongle_memsize;
+module_param(dhd_dongle_memsize, int, 0);
+
+/* Control fw roaming */
+#ifdef CUSTOMER_HW2
+uint dhd_roam = 0;
+#else
+uint dhd_roam = 1;
+#endif
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ];
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE()	(!in_atomic())
+#else
+#define BLOCKABLE()	(!in_interrupt())
+#endif
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE        "drivers/net/wireless/bcm4329"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#else
+#define DHD_COMPILED
+#endif
+
+static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+#ifdef DHD_DEBUG
+"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
+#endif
+;
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+                             wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+	int ret = NOTIFY_DONE;
+
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		dhd_mmc_suspend = TRUE;
+		ret = NOTIFY_OK;
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		dhd_mmc_suspend = FALSE;
+		ret = NOTIFY_OK;
+		break;
+	}
+	smp_mb();
+	return ret;
+}
+
+static struct notifier_block dhd_sleep_pm_notifier = {
+	.notifier_call = dhd_sleep_pm_callback,
+	.priority = 0
+};
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
+	/* 1 - Enable packet filter, only allow unicast packet to send up */
+	/* 0 - Disable packet filter */
+	if (dhd_pkt_filter_enable) {
+		int i;
+
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+					value, dhd_master_mode);
+		}
+	}
+#endif
+}
+
+
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+	int power_mode = PM_MAX;
+	/* wl_pkt_filter_enable_t	enable_parm; */
+	char iovbuf[32];
+	int bcn_li_dtim = 3;
+#ifdef CUSTOMER_HW2
+	uint roamvar = 1;
+#endif /* CUSTOMER_HW2 */
+
+	DHD_TRACE(("%s: enter, value = %d in_suspend = %d\n",
+			__FUNCTION__, value, dhd->in_suspend));
+
+	if (dhd && dhd->up) {
+		if (value && dhd->in_suspend) {
+
+			/* Kernel suspended */
+			DHD_TRACE(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM,
+				(char *)&power_mode, sizeof(power_mode));
+
+			/* Enable packet filter, only allow unicast packet to send up */
+			dhd_set_packet_filter(1, dhd);
+
+			/* if dtim skip setup as default force it to wake each thrid dtim
+			 *  for better power saving.
+			 *  Note that side effect is chance to miss BC/MC packet
+			*/
+			bcn_li_dtim = dhd_get_dtim_skip(dhd);
+			bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+				4, iovbuf, sizeof(iovbuf));
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#ifdef CUSTOMER_HW2
+			/* Disable build-in roaming during suspend */
+			bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#endif /* CUSTOMER_HW2 */
+
+		} else {
+
+			/* Kernel resumed  */
+			DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+			power_mode = PM_FAST;
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode,
+				sizeof(power_mode));
+
+			/* disable pkt filter */
+			dhd_set_packet_filter(0, dhd);
+
+			/* restore pre-suspend setting for dtim_skip */
+			bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
+				4, iovbuf, sizeof(iovbuf));
+
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#ifdef CUSTOMER_HW2
+			roamvar = dhd_roam;
+			bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+			dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+#endif /* CUSTOMER_HW2 */
+		}
+	}
+
+	return 0;
+}
+
+static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
+{
+	dhd_pub_t *dhdp = &dhd->pub;
+
+	dhd_os_wake_lock(dhdp);
+	dhd_os_proto_block(dhdp);
+	/* Set flag when early suspend was called */
+	dhdp->in_suspend = val;
+	if (!dhdp->suspend_disable_flag)
+		dhd_set_suspend(val, dhdp);
+	dhd_os_proto_unblock(dhdp);
+	dhd_os_wake_unlock(dhdp);
+}
+
+static void dhd_early_suspend(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 1);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 0);
+}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+/*
+ * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
+ *
+ *      dhd_timeout_start(&tmo, usec);
+ *      while (!dhd_timeout_expired(&tmo))
+ *              if (poll_something())
+ *                      break;
+ *      if (dhd_timeout_expired(&tmo))
+ *              fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+	tmo->limit = usec;
+	tmo->increment = 0;
+	tmo->elapsed = 0;
+	tmo->tick = 1000000 / HZ;
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+	/* Does nothing the first call */
+	if (tmo->increment == 0) {
+		tmo->increment = 1;
+		return 0;
+	}
+
+	if (tmo->elapsed >= tmo->limit)
+		return 1;
+
+	/* Add the delay that's about to take place */
+	tmo->elapsed += tmo->increment;
+
+	if (tmo->increment < tmo->tick) {
+		OSL_DELAY(tmo->increment);
+		tmo->increment *= 2;
+		if (tmo->increment > tmo->tick)
+			tmo->increment = tmo->tick;
+	} else {
+		wait_queue_head_t delay_wait;
+		DECLARE_WAITQUEUE(wait, current);
+		int pending;
+		init_waitqueue_head(&delay_wait);
+		add_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(1);
+		pending = signal_pending(current);
+		remove_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_RUNNING);
+		if (pending)
+			return 1;	/* Interrupted */
+	}
+
+	return 0;
+}
+
+static int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+	int i = 0;
+
+	ASSERT(dhd);
+	while (i < DHD_MAX_IFS) {
+		if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+			return i;
+		i++;
+	}
+
+	return DHD_BAD_IF;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (name == NULL || *name == '\0')
+		return 0;
+
+	while (--i > 0)
+		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+				break;
+
+	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+	return i;	/* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return "<if_bad>";
+	}
+
+	if (dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+		return "<if_null>";
+	}
+
+	if (dhd->iflist[ifidx]->net)
+		return dhd->iflist[ifidx]->net->name;
+
+	return "<if_none>";
+}
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+	struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	struct netdev_hw_addr *ha;
+#else
+	struct dev_mc_list *mclist;
+#endif
+	uint32 allmulti, cnt;
+
+	wl_ioctl_t ioc;
+	char *buf, *bufp;
+	uint buflen;
+	int ret;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+
+	NETIF_ADDR_LOCK(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	cnt = netdev_mc_count(dev);
+#else
+	cnt = dev->mc_count;
+#endif
+	NETIF_ADDR_UNLOCK(dev);
+
+	/* Determine initial value of allmulti flag */
+	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+	/* Send down the multicast list first. */
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+		           dhd_ifname(&dhd->pub, ifidx), cnt));
+		return;
+	}
+
+	strcpy(bufp, "mcast_list");
+	bufp += strlen("mcast_list") + 1;
+
+	cnt = htol32(cnt);
+	memcpy(bufp, &cnt, sizeof(cnt));
+	bufp += sizeof(cnt);
+
+	NETIF_ADDR_LOCK(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	netdev_for_each_mc_addr(ha, dev) {
+		if (!cnt)
+			break;
+		memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+		bufp += ETHER_ADDR_LEN;
+		cnt--;
+	}
+#else
+	for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
+		memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+		bufp += ETHER_ADDR_LEN;
+	}
+#endif
+	NETIF_ADDR_UNLOCK(dev);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+			dhd_ifname(&dhd->pub, ifidx), cnt));
+		allmulti = cnt ? TRUE : allmulti;
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+
+	buflen = sizeof("allmulti") + sizeof(allmulti);
+	if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+		return;
+	}
+	allmulti = htol32(allmulti);
+
+	if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+		DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+		           dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+		MFREE(dhd->pub.osh, buf, buflen);
+		return;
+	}
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set allmulti %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+	allmulti = htol32(allmulti);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_PROMISC;
+	ioc.buf = &allmulti;
+	ioc.len = sizeof(allmulti);
+	ioc.set = TRUE;
+
+	ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set promisc %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+}
+
+static int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+{
+	char buf[32];
+	wl_ioctl_t ioc;
+	int ret;
+
+	DHD_TRACE(("%s enter\n", __FUNCTION__));
+	if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+		return -1;
+	}
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = 32;
+	ioc.set = TRUE;
+
+	ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+	} else {
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+/* semaphore that the soft AP CODE waits on */
+extern struct semaphore ap_eth_sema;
+#endif
+
+static void
+dhd_op_if(dhd_if_t *ifp)
+{
+	dhd_info_t *dhd;
+	int ret = 0, err = 0;
+#ifdef SOFTAP
+	unsigned long flags;
+#endif
+
+	ASSERT(ifp && ifp->info && ifp->idx);	/* Virtual interfaces only */
+
+	dhd = ifp->info;
+
+	DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+
+	switch (ifp->state) {
+	case WLC_E_IF_ADD:
+		/*
+		 * Delete the existing interface before overwriting it
+		 * in case we missed the WLC_E_IF_DEL event.
+		 */
+		if (ifp->net != NULL) {
+			DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
+			 __FUNCTION__, ifp->net->name));
+			netif_stop_queue(ifp->net);
+			unregister_netdev(ifp->net);
+			free_netdev(ifp->net);
+		}
+		/* Allocate etherdev, including space for private structure */
+		if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
+			DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+			ret = -ENOMEM;
+		}
+		if (ret == 0) {
+			strcpy(ifp->net->name, ifp->name);
+			memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
+			if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
+				DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
+					__FUNCTION__, err));
+				ret = -EOPNOTSUPP;
+			} else {
+#ifdef SOFTAP
+				flags = dhd_os_spin_lock(&dhd->pub);
+				/* save ptr to wl0.1 netdev for use in wl_iw.c  */
+				ap_net_dev = ifp->net;
+				 /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
+				up(&ap_eth_sema);
+				dhd_os_spin_unlock(&dhd->pub, flags);
+#endif
+				DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
+					current->pid, ifp->net->name));
+				ifp->state = 0;
+			}
+		}
+		break;
+	case WLC_E_IF_DEL:
+		if (ifp->net != NULL) {
+			DHD_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n", __FUNCTION__));
+			netif_stop_queue(ifp->net);
+			unregister_netdev(ifp->net);
+			ret = DHD_DEL_IF;	/* Make sure the free_netdev() is called */
+		}
+		break;
+	default:
+		DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
+		ASSERT(!ifp->state);
+		break;
+	}
+
+	if (ret < 0) {
+		if (ifp->net) {
+			free_netdev(ifp->net);
+		}
+		dhd->iflist[ifp->idx] = NULL;
+		MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+#ifdef SOFTAP
+		flags = dhd_os_spin_lock(&dhd->pub);
+		if (ifp->net == ap_net_dev)
+			ap_net_dev = NULL;     /* NULL SOFTAP global as well */
+		dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /*  SOFTAP */
+	}
+}
+
+static int
+_dhd_sysioc_thread(void *data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+	int i;
+#ifdef SOFTAP
+	bool in_ap = FALSE;
+	unsigned long flags;
+#endif
+
+	DAEMONIZE("dhd_sysioc");
+
+	while (down_interruptible(&dhd->sysioc_sem) == 0) {
+		dhd_os_start_lock(&dhd->pub);
+		dhd_os_wake_lock(&dhd->pub);
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				DHD_TRACE(("%s: interface %d\n",__FUNCTION__, i));
+#ifdef SOFTAP
+				flags = dhd_os_spin_lock(&dhd->pub);
+				in_ap = (ap_net_dev != NULL);
+				dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+				if (dhd->iflist[i]->state)
+					dhd_op_if(dhd->iflist[i]);
+#ifdef SOFTAP
+				if (dhd->iflist[i] == NULL) {
+					DHD_TRACE(("%s: interface %d just been removed!\n\n", __FUNCTION__, i));
+					continue;
+				}
+
+				if (in_ap && dhd->set_macaddress) {
+					DHD_TRACE(("attempt to set MAC for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name));
+					dhd->set_macaddress = FALSE;
+					continue;
+				}
+
+				if (in_ap && dhd->set_multicast)  {
+					DHD_TRACE(("attempt to set MULTICAST list for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name));
+					dhd->set_multicast = FALSE;
+					continue;
+				}
+#endif /* SOFTAP */
+				if (dhd->set_multicast) {
+					dhd->set_multicast = FALSE;
+					_dhd_set_multicast_list(dhd, i);
+				}
+				if (dhd->set_macaddress) {
+					dhd->set_macaddress = FALSE;
+					_dhd_set_mac_address(dhd, i, &dhd->macvalue);
+				}
+			}
+		}
+		dhd_os_wake_unlock(&dhd->pub);
+		dhd_os_start_unlock(&dhd->pub);
+	}
+	DHD_TRACE(("%s: stopped\n",__FUNCTION__));
+	complete_and_exit(&dhd->sysioc_exited, 0);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n",__FUNCTION__));
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return -1;
+
+	ASSERT(dhd->sysioc_pid >= 0);
+	memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+	dhd->set_macaddress = TRUE;
+	up(&dhd->sysioc_sem);
+
+	return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n",__FUNCTION__));
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	ASSERT(dhd->sysioc_pid >= 0);
+	dhd->set_multicast = TRUE;
+	up(&dhd->sysioc_sem);
+}
+
+int
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+
+	/* Reject if down */
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		return -ENODEV;
+	}
+
+	/* Update multicast statistic */
+	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+		struct ether_header *eh = (struct ether_header *)pktdata;
+
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			dhdp->tx_multicast++;
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+			atomic_inc(&dhd->pend_8021x_cnt);
+	}
+
+	/* Look into the packet and update the packet priority */
+	if ((PKTPRIO(pktbuf) == 0))
+		pktsetprio(pktbuf, FALSE);
+
+	/* If the protocol uses a data header, apply it */
+	dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+	/* Use bus module to send data frame */
+#ifdef BCMDBUS
+	ret = dbus_send_pkt(dhdp->dbus, pktbuf, NULL /* pktinfo */);
+#else
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMDBUS */
+
+	return ret;
+}
+
+static int
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	void *pktbuf;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_wake_lock(&dhd->pub);
+
+	/* Reject if down */
+	if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
+		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n",
+			 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+		netif_stop_queue(net);
+		/* Send Event when bus down detected during data session */
+		if (dhd->pub.busstate == DHD_BUS_DOWN)  {
+			DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
+			net_os_send_hang_message(net);
+		}
+		dhd_os_wake_unlock(&dhd->pub);
+		return -ENODEV;
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+		netif_stop_queue(net);
+		dhd_os_wake_unlock(&dhd->pub);
+		return -ENODEV;
+	}
+
+	/* Make sure there's enough room for any header */
+	if (skb_headroom(skb) < dhd->pub.hdrlen) {
+		struct sk_buff *skb2;
+
+		DHD_INFO(("%s: insufficient headroom\n",
+		          dhd_ifname(&dhd->pub, ifidx)));
+		dhd->pub.tx_realloc++;
+		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen);
+		dev_kfree_skb(skb);
+		if ((skb = skb2) == NULL) {
+			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+			           dhd_ifname(&dhd->pub, ifidx)));
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+
+	/* Convert to packet */
+	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+		           dhd_ifname(&dhd->pub, ifidx)));
+		dev_kfree_skb_any(skb);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+done:
+	if (ret)
+		dhd->pub.dstats.tx_dropped++;
+	else
+		dhd->pub.tx_packets++;
+
+	dhd_os_wake_unlock(&dhd->pub);
+
+	/* Return ok: we always eat the packet */
+	return 0;
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+	struct net_device *net;
+	dhd_info_t *dhd = dhdp->info;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhdp->txoff = state;
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	net = dhd->iflist[ifidx]->net;
+	if (state == ON)
+		netif_stop_queue(net);
+	else
+		netif_wake_queue(net);
+}
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	uchar *eth;
+	uint len;
+	void * data, *pnext, *save_pktbuf;
+	int i;
+	dhd_if_t *ifp;
+	wl_event_msg_t event;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	save_pktbuf = pktbuf;
+
+	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+
+		pnext = PKTNEXT(dhdp->osh, pktbuf);
+		PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+
+
+		skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+		/* Get the protocol, maintain skb around eth_type_trans()
+		 * The main reason for this hack is for the limitation of
+		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+		 * coping of the packet coming from the network stack to add
+		 * BDC, Hardware header etc, during network interface registration
+		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+		 * for BDC, Hardware header etc. and not just the ETH_HLEN
+		 */
+		eth = skb->data;
+		len = skb->len;
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			dhd->pub.rx_multicast++;
+		}
+
+		skb->data = eth;
+		skb->len = len;
+
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Process special event packets and then discard them */
+		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM)
+			dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+			skb->mac_header,
+#else
+			skb->mac.raw,
+#endif
+			&event,
+			&data);
+
+		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+		if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
+			ifp = dhd->iflist[ifidx];
+
+		if (ifp->net)
+			ifp->net->last_rx = jiffies;
+
+		dhdp->dstats.rx_bytes += skb->len;
+		dhdp->rx_packets++; /* Local count */
+
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			/* If the receive is not processed inside an ISR,
+			 * the softirqd must be woken explicitly to service
+			 * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
+			 * by netif_rx_ni(), but in earlier kernels, we need
+			 * to do it manually.
+			 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+			netif_rx_ni(skb);
+#else
+			ulong flags;
+			netif_rx(skb);
+			local_irq_save(flags);
+			RAISE_RX_SOFTIRQ();
+			local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+		}
+	}
+	dhd_os_wake_lock_timeout_enable(dhdp);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+	/* Linux version has nothing to do */
+	return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	uint ifidx;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh;
+	uint16 type;
+
+	dhd_prot_hdrpull(dhdp, &ifidx, txp);
+
+	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+	type  = ntoh16(eh->ether_type);
+
+	if (type == ETHER_TYPE_802_1X)
+		atomic_dec(&dhd->pend_8021x_cnt);
+
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	dhd_if_t *ifp;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF)
+		return NULL;
+
+	ifp = dhd->iflist[ifidx];
+	ASSERT(dhd && ifp);
+
+	if (dhd->pub.up) {
+		/* Use the protocol to get dongle stats */
+		dhd_prot_dstats(&dhd->pub);
+	}
+
+	/* Copy dongle stats to net device stats */
+	ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
+	ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
+	ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
+	ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
+	ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
+	ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
+	ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
+	ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
+	ifp->stats.multicast = dhd->pub.dstats.multicast;
+
+	return &ifp->stats;
+}
+
+static int
+dhd_watchdog_thread(void *data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+#ifdef DHD_SCHED
+	if (dhd_watchdog_prio > 0) {
+		struct sched_param param;
+		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+			dhd_watchdog_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+#endif /* DHD_SCHED */
+
+	DAEMONIZE("dhd_watchdog");
+
+	/* Run until signal received */
+	while (1) {
+		if (down_interruptible (&dhd->watchdog_sem) == 0) {
+			dhd_os_sdlock(&dhd->pub);
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+				/* Call the bus module watchdog */
+				dhd_bus_watchdog(&dhd->pub);
+
+				/* Count the tick for reference */
+				dhd->pub.tickcnt++;
+
+				/* Reschedule the watchdog */
+				if (dhd->wd_timer_valid)
+					mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+			}
+			dhd_os_sdunlock(&dhd->pub);
+			dhd_os_wake_unlock(&dhd->pub);
+		} else {
+			break;
+		}
+	}
+
+	complete_and_exit(&dhd->watchdog_exited, 0);
+}
+
+static void
+dhd_watchdog(ulong data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+
+	dhd_os_wake_lock(&dhd->pub);
+	if (dhd->pub.dongle_reset) {
+		dhd_os_wake_unlock(&dhd->pub);
+		return;
+	}
+
+	if (dhd->watchdog_pid >= 0) {
+		up(&dhd->watchdog_sem);
+		return;
+	}
+
+	dhd_os_sdlock(&dhd->pub);
+	/* Call the bus module watchdog */
+	dhd_bus_watchdog(&dhd->pub);
+
+	/* Count the tick for reference */
+	dhd->pub.tickcnt++;
+
+	/* Reschedule the watchdog */
+	if (dhd->wd_timer_valid)
+		mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+	dhd_os_sdunlock(&dhd->pub);
+	dhd_os_wake_unlock(&dhd->pub);
+}
+
+static int
+dhd_dpc_thread(void *data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+#ifdef DHD_SCHED
+	if (dhd_dpc_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+#endif /* DHD_SCHED */
+
+	DAEMONIZE("dhd_dpc");
+
+	/* Run until signal received */
+	while (1) {
+		if (down_interruptible(&dhd->dpc_sem) == 0) {
+			/* Call bus dpc unless it indicated down (then clean stop) */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				if (dhd_bus_dpc(dhd->pub.bus)) {
+					up(&dhd->dpc_sem);
+				}
+				else {
+					dhd_os_wake_unlock(&dhd->pub);
+				}
+			} else {
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+				dhd_os_wake_unlock(&dhd->pub);
+			}
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&dhd->dpc_exited, 0);
+}
+
+static void
+dhd_dpc(ulong data)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)data;
+
+	/* Call bus dpc unless it indicated down (then clean stop) */
+	if (dhd->pub.busstate != DHD_BUS_DOWN) {
+		if (dhd_bus_dpc(dhd->pub.bus))
+			tasklet_schedule(&dhd->tasklet);
+	} else {
+		dhd_bus_stop(dhd->pub.bus, TRUE);
+	}
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	dhd_os_wake_lock(dhdp);
+	if (dhd->dpc_pid >= 0) {
+		up(&dhd->dpc_sem);
+		return;
+	}
+
+	tasklet_schedule(&dhd->tasklet);
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strcpy(buf, "toe_ol");
+	if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		/* Check for older dongle image that doesn't support toe_ol */
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: toe not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+
+		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	memcpy(toe_ol, buf, sizeof(uint32));
+	return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int toe, ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = TRUE;
+
+	/* Set toe_ol as requested */
+
+	strcpy(buf, "toe_ol");
+	memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+	if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+			dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	/* Enable toe globally only if any components are enabled. */
+
+	toe = (toe_ol != 0);
+
+	strcpy(buf, "toe");
+	memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+	if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void dhd_ethtool_get_drvinfo(struct net_device *net,
+                                    struct ethtool_drvinfo *info)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+	sprintf(info->driver, "wl");
+	sprintf(info->version, "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+	.get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+	struct ethtool_drvinfo info;
+	char drvname[sizeof(info.driver)];
+	uint32 cmd;
+#ifdef TOE
+	struct ethtool_value edata;
+	uint32 toe_cmpnt, csum_dir;
+	int ret;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* all ethtool calls start with a cmd word */
+	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GDRVINFO:
+		/* Copy out any request driver name */
+		if (copy_from_user(&info, uaddr, sizeof(info)))
+			return -EFAULT;
+		strncpy(drvname, info.driver, sizeof(info.driver));
+		drvname[sizeof(info.driver)-1] = '\0';
+
+		/* clear struct for return */
+		memset(&info, 0, sizeof(info));
+		info.cmd = cmd;
+
+		/* if dhd requested, identify ourselves */
+		if (strcmp(drvname, "?dhd") == 0) {
+			sprintf(info.driver, "dhd");
+			strcpy(info.version, EPI_VERSION_STR);
+		}
+
+		/* otherwise, require dongle to be up */
+		else if (!dhd->pub.up) {
+			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+			return -ENODEV;
+		}
+
+		/* finally, report dongle driver type */
+		else if (dhd->pub.iswl)
+			sprintf(info.driver, "wl");
+		else
+			sprintf(info.driver, "xx");
+
+		sprintf(info.version, "%lu", dhd->pub.drv_version);
+		if (copy_to_user(uaddr, &info, sizeof(info)))
+			return -EFAULT;
+		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+		         (int)sizeof(drvname), drvname, info.driver));
+		break;
+
+#ifdef TOE
+	/* Get toe offload components from dongle */
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GTXCSUM:
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		edata.cmd = cmd;
+		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+		if (copy_to_user(uaddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+
+	/* Set toe offload components in dongle */
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_STXCSUM:
+		if (copy_from_user(&edata, uaddr, sizeof(edata)))
+			return -EFAULT;
+
+		/* Read the current settings, update and write back */
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		if (edata.data != 0)
+			toe_cmpnt |= csum_dir;
+		else
+			toe_cmpnt &= ~csum_dir;
+
+		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+			return ret;
+
+		/* If setting TX checksum mode, tell Linux the new mode */
+		if (cmd == ETHTOOL_STXCSUM) {
+			if (edata.data)
+				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+			else
+				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+
+		break;
+#endif /* TOE */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	dhd_ioctl_t ioc;
+	int bcmerror = 0;
+	int buflen = 0;
+	void *buf = NULL;
+	uint driver = 0;
+	int ifidx;
+	bool is_set_key_cmd;
+	int ret;
+
+	dhd_os_wake_lock(&dhd->pub);
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+	if (ifidx == DHD_BAD_IF) {
+		dhd_os_wake_unlock(&dhd->pub);
+		return -1;
+	}
+
+#if defined(CONFIG_WIRELESS_EXT)
+	/* linux wireless extensions */
+	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+		/* may recurse, do NOT lock */
+		ret = wl_iw_ioctl(net, ifr, cmd);
+		dhd_os_wake_unlock(&dhd->pub);
+		return ret;
+	}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+	if (cmd == SIOCETHTOOL) {
+		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+		dhd_os_wake_unlock(&dhd->pub);
+		return ret;
+	}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+	if (cmd != SIOCDEVPRIVATE) {
+		dhd_os_wake_unlock(&dhd->pub);
+		return -EOPNOTSUPP;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	/* Copy the ioc control structure part of ioctl request */
+	if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+		bcmerror = -BCME_BADADDR;
+		goto done;
+	}
+
+	/* Copy out any buffer passed */
+	if (ioc.buf) {
+		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+		/* optimization for direct ioctl calls from kernel */
+		/*
+		if (segment_eq(get_fs(), KERNEL_DS)) {
+			buf = ioc.buf;
+		} else {
+		*/
+		{
+			if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
+				bcmerror = -BCME_NOMEM;
+				goto done;
+			}
+			if (copy_from_user(buf, ioc.buf, buflen)) {
+				bcmerror = -BCME_BADADDR;
+				goto done;
+			}
+		}
+	}
+
+	/* To differentiate between wl and dhd read 4 more byes */
+	if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+		sizeof(uint)) != 0)) {
+		bcmerror = -BCME_BADADDR;
+		goto done;
+	}
+
+	if (!capable(CAP_NET_ADMIN)) {
+		bcmerror = -BCME_EPERM;
+		goto done;
+	}
+
+	/* check for local dhd ioctl and handle it */
+	if (driver == DHD_IOCTL_MAGIC) {
+		bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+		if (bcmerror)
+			dhd->pub.bcmerror = bcmerror;
+		goto done;
+	}
+
+	/* send to dongle (must be up, and wl) */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("%s DONGLE_DOWN\n", __FUNCTION__));
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	if (!dhd->pub.iswl) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	/* Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+	 * prevent M4 encryption.
+	 */
+	is_set_key_cmd = ((ioc.cmd == WLC_SET_KEY) ||
+	                 ((ioc.cmd == WLC_SET_VAR) &&
+	                        !(strncmp("wsec_key", ioc.buf, 9))) ||
+	                 ((ioc.cmd == WLC_SET_VAR) &&
+	                        !(strncmp("bsscfg:wsec_key", ioc.buf, 15))));
+	if (is_set_key_cmd) {
+		dhd_wait_pend8021x(net);
+	}
+
+	bcmerror = dhd_prot_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+
+done:
+	if ((bcmerror == -ETIMEDOUT) || ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+			(!dhd->pub.dongle_reset))) {
+		DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
+		net_os_send_hang_message(net);
+	}
+
+	if (!bcmerror && buf && ioc.buf) {
+		if (copy_to_user(ioc.buf, buf, buflen))
+			bcmerror = -EFAULT;
+	}
+
+	if (buf)
+		MFREE(dhd->pub.osh, buf, buflen);
+
+	dhd_os_wake_unlock(&dhd->pub);
+
+	return OSL_ERROR(bcmerror);
+}
+
+static int
+dhd_stop(struct net_device *net)
+{
+#if !defined(IGNORE_ETH0_DOWN)
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+	DHD_TRACE(("%s: Enter %s\n", __FUNCTION__, net->name));
+	if (dhd->pub.up == 0) {
+		return 0;
+	}
+
+	/* Set state and stop OS transmissions */
+	dhd->pub.up = 0;
+	netif_stop_queue(net);
+#else
+	DHD_ERROR(("BYPASS %s:due to BRCM compilation : under investigation ...\n", __FUNCTION__));
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+
+	OLD_MOD_DEC_USE_COUNT;
+	return 0;
+}
+
+static int
+dhd_open(struct net_device *net)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+#ifdef TOE
+	uint32 toe_ol;
+#endif
+	int ifidx;
+
+	/*  Force start if ifconfig_up gets called before START command */
+	wl_control_wl_start(net);
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if ((dhd->iflist[ifidx]) && (dhd->iflist[ifidx]->state == WLC_E_IF_DEL)) {
+		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (ifidx == 0) { /* do it only for primary eth0 */
+
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+
+	memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+	/* Get current TOE mode from dongle */
+	if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+		dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+	else
+		dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif
+	}
+	/* Allow transmit calls */
+	netif_start_queue(net);
+	dhd->pub.up = 1;
+
+	OLD_MOD_INC_USE_COUNT;
+	return 0;
+}
+
+osl_t *
+dhd_osl_attach(void *pdev, uint bustype)
+{
+	return osl_attach(pdev, bustype, TRUE);
+}
+
+void
+dhd_osl_detach(osl_t *osh)
+{
+	if (MALLOCED(osh)) {
+		DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+	}
+	osl_detach(osh);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1
+	up(&dhd_registration_sem);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+int
+dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+	uint8 *mac_addr, uint32 flags, uint8 bssidx)
+{
+	dhd_if_t *ifp;
+
+	DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+
+	ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+
+	ifp = dhd->iflist[ifidx];
+	if (!ifp && !(ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t)))) {
+		DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	memset(ifp, 0, sizeof(dhd_if_t));
+	ifp->info = dhd;
+	dhd->iflist[ifidx] = ifp;
+	strncpy(ifp->name, name, IFNAMSIZ);
+	ifp->name[IFNAMSIZ] = '\0';
+	if (mac_addr != NULL)
+		memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+	if (handle == NULL) {
+		ifp->state = WLC_E_IF_ADD;
+		ifp->idx = ifidx;
+		ASSERT(dhd->sysioc_pid >= 0);
+		up(&dhd->sysioc_sem);
+	} else
+		ifp->net = (struct net_device *)handle;
+
+	return 0;
+}
+
+void
+dhd_del_if(dhd_info_t *dhd, int ifidx)
+{
+	dhd_if_t *ifp;
+
+	DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
+	ifp = dhd->iflist[ifidx];
+	if (!ifp) {
+		DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
+		return;
+	}
+
+	ifp->state = WLC_E_IF_DEL;
+	ifp->idx = ifidx;
+	ASSERT(dhd->sysioc_pid >= 0);
+	up(&dhd->sysioc_sem);
+}
+
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+	dhd_info_t *dhd = NULL;
+	struct net_device *net;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	/* updates firmware nvram path if it was provided as module paramters */
+	if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
+		strcpy(fw_path, firmware_path);
+	if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
+		strcpy(nv_path, nvram_path);
+
+	/* Allocate etherdev, including space for private structure */
+	if (!(net = alloc_etherdev(sizeof(dhd)))) {
+		DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Allocate primary dhd_info */
+	if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
+		DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+		goto fail;
+	}
+
+	memset(dhd, 0, sizeof(dhd_info_t));
+
+	/*
+	 * Save the dhd_info into the priv
+	 */
+	memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+	dhd->pub.osh = osh;
+
+	/* Set network interface name if it was provided as module parameter */
+	if (iface_name[0]) {
+		int len;
+		char ch;
+		strncpy(net->name, iface_name, IFNAMSIZ);
+		net->name[IFNAMSIZ - 1] = 0;
+		len = strlen(net->name);
+		ch = net->name[len - 1];
+		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+			strcat(net->name, "%d");
+	}
+
+	if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+		goto fail;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+
+	mutex_init(&dhd->proto_sem);
+	/* Initialize other structure content */
+	init_waitqueue_head(&dhd->ioctl_resp_wait);
+	init_waitqueue_head(&dhd->ctrl_wait);
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&dhd->sdlock);
+	spin_lock_init(&dhd->txqlock);
+	spin_lock_init(&dhd->dhd_lock);
+
+	/* Initialize Wakelock stuff */
+	spin_lock_init(&dhd->wl_lock);
+	dhd->wl_count = 0;
+	dhd->wl_packet = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhd->wl_start_lock);
+#endif
+	/* Link to info module */
+	dhd->pub.info = dhd;
+
+	/* Link to bus module */
+	dhd->pub.bus = bus;
+	dhd->pub.hdrlen = bus_hdrlen;
+
+	/* Attach and link in the protocol */
+	if (dhd_prot_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_prot_attach failed\n"));
+		goto fail;
+	}
+#if defined(CONFIG_WIRELESS_EXT)
+	/* Attach and link in the iw */
+	if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+		DHD_ERROR(("wl_iw_attach failed\n"));
+		goto fail;
+	}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	/* Set up the watchdog timer */
+	init_timer(&dhd->timer);
+	dhd->timer.data = (ulong)dhd;
+	dhd->timer.function = dhd_watchdog;
+
+	/* Initialize thread based operation and lock */
+	mutex_init(&dhd->sdsem);
+	if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
+		dhd->threads_only = TRUE;
+	}
+	else {
+		dhd->threads_only = FALSE;
+	}
+
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize watchdog thread */
+		sema_init(&dhd->watchdog_sem, 0);
+		init_completion(&dhd->watchdog_exited);
+		dhd->watchdog_pid = kernel_thread(dhd_watchdog_thread, dhd, 0);
+	} else {
+		dhd->watchdog_pid = -1;
+	}
+
+	/* Set up the bottom half handler */
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize DPC thread */
+		sema_init(&dhd->dpc_sem, 0);
+		init_completion(&dhd->dpc_exited);
+		dhd->dpc_pid = kernel_thread(dhd_dpc_thread, dhd, 0);
+	} else {
+		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+		dhd->dpc_pid = -1;
+	}
+
+	if (dhd_sysioc) {
+		sema_init(&dhd->sysioc_sem, 0);
+		init_completion(&dhd->sysioc_exited);
+		dhd->sysioc_pid = kernel_thread(_dhd_sysioc_thread, dhd, 0);
+	} else {
+		dhd->sysioc_pid = -1;
+	}
+
+	/*
+	 * Save the dhd_info into the priv
+	 */
+	memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+	register_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /*  (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+	dhd->early_suspend.suspend = dhd_early_suspend;
+	dhd->early_suspend.resume = dhd_late_resume;
+	register_early_suspend(&dhd->early_suspend);
+#endif
+
+	register_inetaddr_notifier(&dhd_notifier);
+
+	return &dhd->pub;
+
+fail:
+	if (net)
+		free_netdev(net);
+	if (dhd)
+		dhd_detach(&dhd->pub);
+
+	return NULL;
+}
+
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+	int ret = -1;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+#ifdef EMBEDDED_PLATFORM
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+#endif /* EMBEDDED_PLATFORM */
+
+	ASSERT(dhd);
+
+	DHD_TRACE(("%s: \n", __FUNCTION__));
+
+	dhd_os_sdlock(dhdp);
+
+	/* try to download image and nvram to the dongle */
+	if  (dhd->pub.busstate == DHD_BUS_DOWN) {
+		if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+		                                fw_path, nv_path))) {
+			DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
+			           __FUNCTION__, fw_path, nv_path));
+			dhd_os_sdunlock(dhdp);
+			return -1;
+		}
+	}
+
+	/* Start the watchdog timer */
+	dhd->pub.tickcnt = 0;
+	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+	/* Bring up the bus */
+	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+		dhd_os_sdunlock(dhdp);
+		return ret;
+	}
+#if defined(OOB_INTR_ONLY)
+	/* Host registration for OOB interrupt */
+	if (bcmsdh_register_oob_intr(dhdp)) {
+		dhd->wd_timer_valid = FALSE;
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s Host failed to resgister for OOB\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		return -ENODEV;
+	}
+
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+	/* If bus is not ready, can't come up */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		dhd->wd_timer_valid = FALSE;
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		return -ENODEV;
+	}
+
+	dhd_os_sdunlock(dhdp);
+
+#ifdef EMBEDDED_PLATFORM
+	bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	dhdcdc_query_ioctl(dhdp, 0, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, dhdp->eventmask, WL_EVENTING_MASK_LEN);
+
+	setbit(dhdp->eventmask, WLC_E_SET_SSID);
+	setbit(dhdp->eventmask, WLC_E_PRUNE);
+	setbit(dhdp->eventmask, WLC_E_AUTH);
+	setbit(dhdp->eventmask, WLC_E_REASSOC);
+	setbit(dhdp->eventmask, WLC_E_REASSOC_IND);
+	setbit(dhdp->eventmask, WLC_E_DEAUTH_IND);
+	setbit(dhdp->eventmask, WLC_E_DISASSOC_IND);
+	setbit(dhdp->eventmask, WLC_E_DISASSOC);
+	setbit(dhdp->eventmask, WLC_E_JOIN);
+	setbit(dhdp->eventmask, WLC_E_ASSOC_IND);
+	setbit(dhdp->eventmask, WLC_E_PSK_SUP);
+	setbit(dhdp->eventmask, WLC_E_LINK);
+	setbit(dhdp->eventmask, WLC_E_NDIS_LINK);
+	setbit(dhdp->eventmask, WLC_E_MIC_ERROR);
+	setbit(dhdp->eventmask, WLC_E_PMKID_CACHE);
+	setbit(dhdp->eventmask, WLC_E_TXFAIL);
+	setbit(dhdp->eventmask, WLC_E_JOIN_START);
+	setbit(dhdp->eventmask, WLC_E_SCAN_COMPLETE);
+	setbit(dhdp->eventmask, WLC_E_RELOAD);
+#ifdef PNO_SUPPORT
+	setbit(dhdp->eventmask, WLC_E_PFN_NET_FOUND);
+#endif /* PNO_SUPPORT */
+
+/* enable dongle roaming event */
+	setbit(dhdp->eventmask, WLC_E_ROAM);
+
+	dhdp->pktfilter_count = 1;
+	/* Setup filter to allow only unicast */
+	dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00";
+#endif /* EMBEDDED_PLATFORM */
+
+	/* Bus is ready, do any protocol initialization */
+	if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+		return ret;
+
+	return 0;
+}
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+	char buf[strlen(name) + 1 + cmd_len];
+	int len = sizeof(buf);
+	wl_ioctl_t ioc;
+	int ret;
+
+	len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_prot_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (!set && ret >= 0)
+		memcpy(cmd_buf, buf, cmd_len);
+
+	return ret;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+	.ndo_open = dhd_open,
+	.ndo_stop = dhd_stop,
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+};
+
+static struct net_device_ops dhd_ops_virt = {
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+};
+#endif
+
+static int dhd_device_event(struct notifier_block *this, unsigned long event,
+				void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+
+	if (!ifa)
+		return NOTIFY_DONE;
+
+	dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+	dhd_pub = &dhd->pub;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+	if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
+#else
+	if (ifa->ifa_dev->dev->open == &dhd_open) {
+#endif
+		switch (event) {
+		case NETDEV_UP:
+			DHD_TRACE(("%s: [%s] Up IP: 0x%x\n",
+			    __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+			dhd_arp_cleanup(dhd_pub);
+			break;
+
+		case NETDEV_DOWN:
+			DHD_TRACE(("%s: [%s] Down IP: 0x%x\n",
+			    __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+			dhd_arp_cleanup(dhd_pub);
+			break;
+
+		default:
+			DHD_TRACE(("%s: [%s] Event: %lu\n",
+			    __FUNCTION__, ifa->ifa_label, event));
+			break;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+int
+dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct net_device *net;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	net = dhd->iflist[ifidx]->net;
+
+	ASSERT(net);
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->get_stats = dhd_get_stats;
+	net->do_ioctl = dhd_ioctl_entry;
+	net->hard_start_xmit = dhd_start_xmit;
+	net->set_mac_address = dhd_set_mac_address;
+	net->set_multicast_list = dhd_set_multicast_list;
+	net->open = net->stop = NULL;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &dhd_ops_virt;
+#endif
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+		net->open = dhd_open;
+		net->stop = dhd_stop;
+#else
+		net->netdev_ops = &dhd_ops_pri;
+#endif
+
+	/*
+	 * We have to use the primary MAC for virtual interfaces
+	 */
+	if (ifidx != 0) {
+		/* for virtual interfaces use the primary MAC  */
+		memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+	}
+
+	if (ifidx == 1) {
+		DHD_TRACE(("%s ACCESS POINT MAC: \n", __FUNCTION__));
+		/*  ACCESSPOINT INTERFACE CASE */
+		temp_addr[0] |= 0x02;  /* set bit 2 , - Locally Administered address  */
+	}
+	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+	net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+	net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen;
+
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	if (register_netdev(net) != 0) {
+		DHD_ERROR(("%s: couldn't register the net device\n", __FUNCTION__));
+		goto fail;
+	}
+
+	printf("%s: Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", net->name,
+	       dhd->pub.mac.octet[0], dhd->pub.mac.octet[1], dhd->pub.mac.octet[2],
+	       dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]);
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if defined(CONFIG_FIRST_SCAN)
+#ifdef SOFTAP
+	if (ifidx == 0)
+		/* Don't call for SOFTAP Interface in SOFTAP MODE */
+		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#else
+		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif /* SOFTAP */
+#endif /* CONFIG_FIRST_SCAN */
+#endif /* CONFIG_WIRELESS_EXT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	up(&dhd_registration_sem);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+	return 0;
+
+fail:
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+	return BCME_ERROR;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+			/* Stop the protocol module */
+			dhd_prot_stop(&dhd->pub);
+
+			/* Stop the bus module */
+			dhd_bus_stop(dhd->pub.bus, TRUE);
+#if defined(OOB_INTR_ONLY)
+			bcmsdh_unregister_oob_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+
+			/* Clear the watchdog timer */
+			dhd->wd_timer_valid = FALSE;
+			del_timer_sync(&dhd->timer);
+		}
+	}
+}
+
+void
+dhd_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+			dhd_if_t *ifp;
+			int i;
+
+			unregister_inetaddr_notifier(&dhd_notifier);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+			if (dhd->early_suspend.suspend)
+				unregister_early_suspend(&dhd->early_suspend);
+#endif	/* defined(CONFIG_HAS_EARLYSUSPEND) */
+#if defined(CONFIG_WIRELESS_EXT)
+			/* Attach and link in the iw */
+			wl_iw_detach();
+#endif
+			if (dhd->sysioc_pid >= 0) {
+				KILL_PROC(dhd->sysioc_pid, SIGTERM);
+				wait_for_completion(&dhd->sysioc_exited);
+			}
+
+			for (i = 1; i < DHD_MAX_IFS; i++)
+				if (dhd->iflist[i]) {
+					dhd->iflist[i]->state = WLC_E_IF_DEL;
+					dhd->iflist[i]->idx = i;
+					dhd_op_if(dhd->iflist[i]);
+				}
+
+			ifp = dhd->iflist[0];
+			ASSERT(ifp);
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+			if (ifp->net->open) {
+#else
+			if (ifp->net->netdev_ops == &dhd_ops_pri) {
+#endif
+				dhd_stop(ifp->net);
+				unregister_netdev(ifp->net);
+			}
+
+			if (dhd->watchdog_pid >= 0)
+			{
+				KILL_PROC(dhd->watchdog_pid, SIGTERM);
+				wait_for_completion(&dhd->watchdog_exited);
+			}
+
+			if (dhd->dpc_pid >= 0)
+			{
+				KILL_PROC(dhd->dpc_pid, SIGTERM);
+				wait_for_completion(&dhd->dpc_exited);
+			}
+			else
+				tasklet_kill(&dhd->tasklet);
+
+			dhd_bus_detach(dhdp);
+
+			if (dhdp->prot)
+				dhd_prot_detach(dhdp);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+			unregister_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+			free_netdev(ifp->net);
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_lock_destroy(&dhd->wl_wifi);
+			wake_lock_destroy(&dhd->wl_rxwake);
+#endif
+			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+			MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+		}
+	}
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_bus_unregister();
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+	wifi_del_dev();
+#endif
+	/* Call customer gpio to turn off power with WL_REG_ON signal */
+	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+}
+
+static int __init
+dhd_module_init(void)
+{
+	int error;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Sanity check on the module parameters */
+	do {
+		/* Both watchdog and DPC as tasklets are ok */
+		if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
+			break;
+
+		/* If both watchdog and DPC are threads, TX must be deferred */
+		if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
+			break;
+
+		DHD_ERROR(("Invalid module parameters.\n"));
+		return -EINVAL;
+	} while (0);
+
+	/* Call customer gpio to turn on power with WL_REG_ON signal */
+	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+	sema_init(&wifi_control_sem, 0);
+
+	error = wifi_add_dev();
+	if (error) {
+		DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
+		goto fail_0;
+	}
+
+	/* Waiting callback after platform_driver_register is done or exit with error */
+	if (down_timeout(&wifi_control_sem,  msecs_to_jiffies(5000)) != 0) {
+		error = -EINVAL;
+		DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
+		goto fail_1;
+	}
+#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	sema_init(&dhd_registration_sem, 0);
+#endif 
+
+	error = dhd_bus_register();
+
+	if (!error)
+		printf("\n%s\n", dhd_version);
+	else {
+		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+		goto fail_1;
+	}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	/*
+	 * Wait till MMC sdio_register_driver callback called and made driver attach.
+	 * It's needed to make sync up exit from dhd insmod  and
+	 * Kernel MMC sdio device callback registration
+	 */
+	if (down_timeout(&dhd_registration_sem,  msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
+		error = -EINVAL;
+		DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
+		goto fail_2;
+	}
+#endif
+	return error;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+fail_2:
+	dhd_bus_unregister();
+#endif
+fail_1:
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+	wifi_del_dev();
+fail_0:
+#endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+	/* Call customer gpio to turn off power with WL_REG_ON signal */
+	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+
+	return error;
+}
+
+module_init(dhd_module_init);
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		mutex_lock(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		mutex_unlock(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+	return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+	dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	DECLARE_WAITQUEUE(wait, current);
+	int timeout = dhd_ioctl_timeout_msec;
+
+	/* Convert timeout in millsecond to jiffies */
+	/* timeout = timeout * HZ / 1000; */
+	timeout = msecs_to_jiffies(timeout);
+
+	/* Wait until control frame is available */
+	add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	smp_mb();
+	while (!(*condition) && (!signal_pending(current) && timeout)) {
+		timeout = schedule_timeout(timeout);
+		smp_mb();
+	}
+
+	if (signal_pending(current))
+		*pending = TRUE;
+
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+
+	return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (waitqueue_active(&dhd->ioctl_resp_wait)) {
+		wake_up_interruptible(&dhd->ioctl_resp_wait);
+	}
+
+	return 0;
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+	int del_timer_flag = FALSE;
+
+	flags = dhd_os_spin_lock(pub);
+
+	/* don't start the wd until fw is loaded */
+	if (pub->busstate != DHD_BUS_DOWN) {
+		if (wdtick) {
+			dhd_watchdog_ms = (uint)wdtick;
+			dhd->wd_timer_valid = TRUE;
+			/* Re arm the timer, at last watchdog period */
+			mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+		} else if (dhd->wd_timer_valid == TRUE) {
+			/* Totally stop the timer */
+			dhd->wd_timer_valid = FALSE;
+			del_timer_flag = TRUE;
+		}
+	}
+	dhd_os_spin_unlock(pub, flags);
+	if (del_timer_flag) {
+		del_timer_sync(&dhd->timer);
+	}
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd->threads_only)
+		mutex_lock(&dhd->sdsem);
+	else
+		spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd->threads_only)
+		mutex_unlock(&dhd->sdsem);
+	else
+		spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->txqlock);
+}
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+	dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+	dhd_os_sdunlock(pub);
+}
+
+#ifdef DHD_USE_STATIC_BUF
+void * dhd_os_prealloc(int section, unsigned long size)
+{
+#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
+	void *alloc_ptr = NULL;
+	if (wifi_control_data && wifi_control_data->mem_prealloc)
+	{
+		alloc_ptr = wifi_control_data->mem_prealloc(section, size);
+		if (alloc_ptr)
+		{
+			DHD_INFO(("success alloc section %d\n", section));
+			bzero(alloc_ptr, size);
+			return alloc_ptr;
+		}
+	}
+
+	DHD_ERROR(("can't alloc section %d\n", section));
+	return 0;
+#else
+return MALLOC(0, size);
+#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+}
+#endif /* DHD_USE_STATIC_BUF */
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+	int res = 0;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+	if (res == 0)
+		return &dhd->iw.wstats;
+	else
+		return NULL;
+}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+	wl_event_msg_t *event, void **data)
+{
+	int bcmerror = 0;
+
+	ASSERT(dhd != NULL);
+
+	bcmerror = wl_host_event(dhd, ifidx, pktdata, event, data);
+	if (bcmerror != BCME_OK)
+		return (bcmerror);
+
+#if defined(CONFIG_WIRELESS_EXT)
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+
+	if (ntoh32(event->event_type) == WLC_E_IF) {
+		DHD_INFO(("<0> interface:%d OP:%d don't pass to wext,"
+			"net_device might not be created yet\n",
+				*ifidx, ntoh32(event->event_type)));
+		return bcmerror;
+	}
+
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+	if (dhd->iflist[*ifidx]->net)
+		wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+	default:
+		break;
+	}
+}
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	dhd_os_sdunlock(dhd);
+	wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
+	dhd_os_sdlock(dhd);
+#endif
+	return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	if (waitqueue_active(&dhdinfo->ctrl_wait))
+		wake_up_interruptible(&dhdinfo->ctrl_wait);
+#endif
+	return;
+}
+
+int
+dhd_dev_reset(struct net_device *dev, uint8 flag)
+{
+	int ret;
+
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	ret = dhd_bus_devreset(&dhd->pub, flag);
+	if (ret) {
+		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+	DHD_ERROR(("%s: WLAN %s DONE\n", __FUNCTION__, flag ? "OFF" : "ON"));
+
+	return ret;
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd) {
+		ret = dhd->pub.suspend_disable_flag;
+		dhd->pub.suspend_disable_flag = val;
+	}
+	return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val)
+{
+	int ret = 0;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd) {
+		dhd_os_proto_block(&dhd->pub);
+		ret = dhd_set_suspend(val, &dhd->pub);
+		dhd_os_proto_unblock(&dhd->pub);
+	}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+	return ret;
+}
+
+int net_os_set_dtim_skip(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd)
+		dhd->pub.dtim_skip = val;
+
+	return 0;
+}
+
+int net_os_set_packet_filter(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	/* Packet filtering is set only if we still in early-suspend and
+	 * we need either to turn it ON or turn it OFF
+	 * We can always turn it OFF in case of early-suspend, but we turn it
+	 * back ON only if suspend_disable_flag was not set
+	*/
+	if (dhd && dhd->pub.up) {
+		dhd_os_proto_block(&dhd->pub);
+		if (dhd->pub.in_suspend) {
+			if (!val || (val && !dhd->pub.suspend_disable_flag))
+				dhd_set_packet_filter(val, &dhd->pub);
+		}
+		dhd_os_proto_unblock(&dhd->pub);
+	}
+	return ret;
+}
+
+
+void
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	dhd_preinit_ioctls(&dhd->pub);
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_clean */
+int
+dhd_dev_pno_reset(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_clean(&dhd->pub));
+}
+
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev,  int pfn_enabled)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_enable(&dhd->pub, pfn_enabled));
+}
+
+
+/* Linux wrapper to call common dhd_pno_set */
+int
+dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+		ushort scan_fr, int pno_repeat, int pno_freq_expo_max)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
+}
+
+/* Linux wrapper to get  pno status */
+int
+dhd_dev_get_pno_status(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_get_status(&dhd->pub));
+}
+
+#endif /* PNO_SUPPORT */
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd) {
+		if (!dhd->hang_was_sent) {
+			dhd->hang_was_sent = 1;
+			ret = wl_iw_send_priv_event(dev, "HANG");
+		}
+	}
+	return ret;
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd && dhd->pub.up)
+		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+}
+
+char *dhd_bus_country_get(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd && (dhd->pub.dhd_cspec.ccode[0] != 0))
+		return dhd->pub.dhd_cspec.ccode;
+	return NULL;
+}
+
+void dhd_os_start_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		mutex_lock(&dhd->wl_start_lock);
+#endif
+}
+
+void dhd_os_start_unlock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		mutex_unlock(&dhd->wl_start_lock);
+#endif
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+	return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX	10
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int timeout = 10 * HZ / 1000;
+	int ntimes = MAX_WAIT_FOR_8021X_TX;
+	int pend = dhd_get_pend_8021x_cnt(dhd);
+
+	while (ntimes && pend) {
+		if (pend) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(timeout);
+			set_current_state(TASK_RUNNING);
+			ntimes--;
+		}
+		pend = dhd_get_pend_8021x_cnt(dhd);
+	}
+	return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+	int ret = 0;
+	struct file *fp;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* open file to write */
+	fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+	if (!fp) {
+		printf("%s: open file error\n", __FUNCTION__);
+		ret = -1;
+		goto exit;
+	}
+
+	/* Write buf to file */
+	fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+	/* free buf before return */
+	MFREE(dhd->osh, buf, size);
+	/* close file before return */
+	if (fp)
+		filp_close(fp, current->files);
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wl_lock, flags);
+		ret = dhd->wl_packet;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (dhd->wl_packet)
+			wake_lock_timeout(&dhd->wl_rxwake, HZ);
+#endif
+		dhd->wl_packet = 0;
+		spin_unlock_irqrestore(&dhd->wl_lock, flags);
+	}
+	/* printk("%s: %d\n", __FUNCTION__, ret); */
+	return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wl_lock, flags);
+		dhd->wl_packet = 1;
+		spin_unlock_irqrestore(&dhd->wl_lock, flags);
+	}
+	/* printk("%s\n",__func__); */
+	return 0;
+}
+
+int net_os_wake_lock_timeout_enable(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout_enable(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wl_lock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+		if (!dhd->wl_count)
+			wake_lock(&dhd->wl_wifi);
+#endif
+		dhd->wl_count++;
+		ret = dhd->wl_count;
+		spin_unlock_irqrestore(&dhd->wl_lock, flags);
+	}
+	/* printk("%s: %d\n", __FUNCTION__, ret); */
+	return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	dhd_os_wake_lock_timeout(pub);
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wl_lock, flags);
+		if (dhd->wl_count) {
+			dhd->wl_count--;
+#ifdef CONFIG_HAS_WAKELOCK
+			if (!dhd->wl_count)
+				wake_unlock(&dhd->wl_wifi);
+#endif
+			ret = dhd->wl_count;
+		}
+		spin_unlock_irqrestore(&dhd->wl_lock, flags);
+	}
+	/* printk("%s: %d\n", __FUNCTION__, ret); */
+	return ret;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_unlock(&dhd->pub);
+	return ret;
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags = 0;
+
+	if (dhd)
+		spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+	return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_linux_sched.c b/drivers/net/wireless/bcm4329/dhd_linux_sched.c
new file mode 100644
index 0000000..480b416
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_linux_sched.c
@@ -0,0 +1,38 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c,v 1.1.34.1.6.1 2009/01/16 01:17:40 Exp $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+	int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+	return rc;
+}
diff --git a/drivers/net/wireless/bcm4329/dhd_proto.h b/drivers/net/wireless/bcm4329/dhd_proto.h
new file mode 100644
index 0000000..7ef6929
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_proto.h
@@ -0,0 +1,102 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h,v 1.2.82.1.4.1.16.7 2010/05/10 12:54:59 Exp $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT  3000 /* In milli second */
+#endif
+
+#ifndef IOCTL_CHIP_ACTIVE_TIMEOUT
+#define IOCTL_CHIP_ACTIVE_TIMEOUT  10 /* In milli second */
+#endif
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, uint8 *fcbits);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                             void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#elif defined(RNDIS)
+#define DHD_PROTOCOL "rndis"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcm4329/dhd_sdio.c b/drivers/net/wireless/bcm4329/dhd_sdio.c
new file mode 100644
index 0000000..1380dd3
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dhd_sdio.c
@@ -0,0 +1,5823 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c,v 1.157.2.27.2.33.2.129.4.1 2010/09/02 23:13:16 Exp $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <sbhnddma.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifdef DHD_DEBUG
+#include <hndrte_cons.h>
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG_TRAP
+#include <hndrte_armtrap.h>
+#endif /* DHD_DEBUG_TRAP */
+
+#define QLEN		256	/* bulk rx and tx queue lengths */
+#define FCHI		(QLEN - 10)
+#define FCLOW		(FCHI / 2)
+#define PRIOMASK	7
+
+#define TXRETRIES	2	/* # of retries for tx frames */
+
+#if defined(CONFIG_MACH_SANDGATE2G)
+#define DHD_RXBOUND	250	/* Default for max rx frames in one scheduling */
+#else
+#define DHD_RXBOUND	50	/* Default for max rx frames in one scheduling */
+#endif /* defined(CONFIG_MACH_SANDGATE2G) */
+
+#define DHD_TXBOUND	20	/* Default for max tx frames in one scheduling */
+
+#define DHD_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold biggest possible glom */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD	32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN	(SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#ifdef SDTEST
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ	32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ	2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY	3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY < 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi.  Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+
+extern void bcmsdh_set_irq(int flag);
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+typedef struct dhd_console {
+	uint		count;			/* Poll interval msec counter */
+	uint		log_addr;		/* Log struct address (fixed) */
+	hndrte_log_t	log;			/* Log struct (host copy) */
+	uint		bufsize;		/* Size of log buffer */
+	uint8		*buf;			/* Log buffer (host copy) */
+	uint		last;			/* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+
+	bcmsdh_info_t	*sdh;			/* Handle for BCMSDH calls */
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+
+	sdpcmd_regs_t	*regs;			/* Registers for SDIO core */
+	uint		sdpcmrev;		/* SDIO core revision */
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		hostintmask;		/* Copy of Host Interrupt Mask */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path; /* module_param: path to firmware image */
+	char		*nv_path; /* module_param: path to nvram vars file */
+	const char      *nvram_params;		/* user specified nvram params. */
+
+	uint		blocksize;		/* Block size of SDIO transfers */
+	uint		roundup;		/* Max roundup limit */
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+	uint8		flowcontrol;		/* per prio flow control bitmask */
+	uint8		tx_seq;			/* Transmit sequence number (next) */
+	uint8		tx_max;			/* Maximum transmit sequence allowed */
+
+	uint8		hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+	uint8		*rxhdr;			/* Header of current rx frame (in hdrbuf) */
+	uint16		nextlen;		/* Next Read Len from last header */
+	uint8		rx_seq;			/* Receive sequence number (expected) */
+	bool		rxskip;			/* Skip receive (awaiting NAK ACK) */
+
+	void		*glomd;			/* Packet containing glomming descriptor */
+	void		*glom;			/* Packet chain for glommed superframe */
+	uint		glomerr;		/* Glom packet read errors */
+
+	uint8		*rxbuf;			/* Buffer for receiving control packets */
+	uint		rxblen;			/* Allocated length of rxbuf */
+	uint8		*rxctl;			/* Aligned pointer into rxbuf */
+	uint8		*databuf;		/* Buffer for receiving big glom packet */
+	uint8		*dataptr;		/* Aligned pointer into databuf */
+	uint		rxlen;			/* Length of valid data in buffer */
+
+	uint8		sdpcm_ver;		/* Bus protocol reported by dongle */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint 		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+	uint		spurious;		/* Count of spurious interrupts */
+	uint		pollrate;		/* Ticks between device polls */
+	uint		polltick;		/* Tick counter */
+	uint		pollcnt;		/* Count of active polls */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	uint		regfails;		/* Count of R_REG/W_REG failures */
+
+	uint		clkstate;		/* State of sd and backplane clock(s) */
+	bool		activity;		/* Activity flag for clock down */
+	int32		idletime;		/* Control for activity timeout */
+	int32		idlecount;		/* Activity timeout counter */
+	int32		idleclock;		/* How to set bus driver when idle */
+	int32		sd_divisor;		/* Speed control to bus driver */
+	int32		sd_mode;		/* Mode control to bus driver */
+	int32		sd_rxchain;		/* If bcmsdh api accepts PKT chains */
+	bool		use_rxchain;		/* If dhd should use PKT chains */
+	bool		sleeping;		/* Is SDIO bus sleeping? */
+	bool		rxflow_mode;	/* Rx flow control mode */
+	bool		rxflow;			/* Is rx flow control on */
+	uint		prev_rxlim_hit;		/* Is prev rx limit exceeded (per dpc schedule) */
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+	/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+	bool		usebufpool;
+
+#ifdef SDTEST
+	/* external loopback */
+	bool		ext_loop;
+	uint8		loopid;
+
+	/* pktgen configuration */
+	uint		pktgen_freq;		/* Ticks between bursts */
+	uint		pktgen_count;		/* Packets to send each burst */
+	uint		pktgen_print;		/* Bursts between count displays */
+	uint		pktgen_total;		/* Stop after this many */
+	uint		pktgen_minlen;		/* Minimum packet data len */
+	uint		pktgen_maxlen;		/* Maximum packet data len */
+	uint		pktgen_mode;		/* Configured mode: tx, rx, or echo */
+	uint		pktgen_stop;		/* Number of tx failures causing stop */
+
+	/* active pktgen fields */
+	uint		pktgen_tick;		/* Tick counter for bursts */
+	uint		pktgen_ptick;		/* Burst counter for printing */
+	uint		pktgen_sent;		/* Number of test packets generated */
+	uint		pktgen_rcvd;		/* Number of test packets received */
+	uint		pktgen_fail;		/* Number of failed send attempts */
+	uint16		pktgen_len;		/* Length of next packet to send */
+#endif /* SDTEST */
+
+	/* Some additional counters */
+	uint		tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint		fcqueued;		/* Tx packets that got queued */
+	uint		rxrtx;			/* Count of rtx requests (NAK to dongle) */
+	uint		rx_toolong;		/* Receive frames too long to receive */
+	uint		rxc_errors;		/* SDIO errors when reading control frames */
+	uint		rx_hdrfail;		/* SDIO errors on header reads */
+	uint		rx_badhdr;		/* Bad received headers (roosync?) */
+	uint		rx_badseq;		/* Mismatched rx sequence number */
+	uint		fc_rcvd;		/* Number of flow-control events received */
+	uint		fc_xoff;		/* Number which turned on flow-control */
+	uint		fc_xon;			/* Number which turned off flow-control */
+	uint		rxglomfail;		/* Failed deglom attempts */
+	uint		rxglomframes;		/* Number of glom frames (superframes) */
+	uint		rxglompkts;		/* Number of packets from glom frames */
+	uint		f2rxhdrs;		/* Number of header reads */
+	uint		f2rxdata;		/* Number of frame data reads */
+	uint		f2txdata;		/* Number of f2 frame writes */
+	uint		f1regdata;		/* Number of f1 register accesses */
+
+	uint8		*ctrl_frame_buf;
+	uint32		ctrl_frame_len;
+	bool		ctrl_frame_stat;
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2	/* Not used yet */
+#define CLK_AVAIL	3
+
+#define DHD_NOPMU(dhd)	(FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+int dhd_dongle_memsize;
+
+static bool dhd_doflow;
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static const uint watermark = 8;
+static const uint firstread = DHD_FIRSTREAD;
+
+#define HDATLEN (firstread - (SDPCM_HDRLEN))
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#define ALIGNMENT  4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align)					\
+	do {								\
+		uint datalign;						\
+		datalign = (uintptr)PKTDATA((osh), (p));		\
+		datalign = ROUNDUP(datalign, (align)) - datalign;	\
+		ASSERT(datalign < (align));				\
+		ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign));	\
+		if (datalign)						\
+			PKTPULL((osh), (p), datalign);			\
+		PKTSETLEN((osh), (p), (len));				\
+	} while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		regvar = R_REG(bus->dhd->osh, regaddr); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) { \
+			DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+			regvar = 0; \
+		} \
+	} \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		W_REG(bus->dhd->osh, regaddr, regval); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) \
+			DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+	} \
+} while (0)
+
+
+#define DHD_BUS			SDIO_BUS
+
+#define PKT_AVAILABLE()		(intstatus & I_HMB_FRAME_IND)
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start);
+#endif
+
+#ifdef DHD_DEBUG_TRAP
+static int dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size);
+#endif /* DHD_DEBUG_TRAP */
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+                                 void * regsva, uint16  devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, int reset_flag);
+
+static uint process_nvram_vars(char *varbuf, uint len);
+
+static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+
+static bool dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(struct dhd_bus *bus);
+
+static int dhdsdio_download_code_file(struct dhd_bus *bus, char *image_path);
+static int dhdsdio_download_nvram(struct dhd_bus *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(struct dhd_bus *bus);
+#endif
+
+
+static void
+dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_MEMSIZE;
+	/* Restrict the memsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_memsize, min_size));
+	if ((dhd_dongle_memsize > min_size) &&
+		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_memsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+	int err = 0;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+	return err;
+}
+
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+	int err;
+	uint8 clkctl, clkreq, devctl;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(OOB_INTR_ONLY)
+	pendok = FALSE;
+#endif
+	clkctl = 0;
+	sdh = bus->sdh;
+
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+		if ((bus->sih->chip == BCM4329_CHIP_ID) && (bus->sih->chiprev == 0))
+			clkreq |= SBSDIO_FORCE_ALP;
+
+
+
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+		if (pendok &&
+		    ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) {
+			uint32 dummy, retries;
+			R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+		}
+
+		/* Check current status */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			DHD_INFO(("CLKCTL: set PENDING\n"));
+			bus->clkstate = CLK_PENDING;
+			return BCME_OK;
+		} else if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+			                                    SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+			          !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+		}
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+			           __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+			return BCME_ERROR;
+		}
+
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+		if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+			if (!SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+			}
+#endif /* !defined(BCMLXSDMMC) */
+		} else {
+			if (SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+			}
+		}
+#endif /* defined (DHD_DEBUG) */
+
+		bus->activity = TRUE;
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		DHD_INFO(("CLKCTL: turned OFF\n"));
+		if (err) {
+			DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+			           __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+	}
+	return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+	int err;
+	int32 iovalue;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (on) {
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			/* Turn on clock and restore mode */
+			iovalue = 1;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			iovalue = bus->sd_mode;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_mode: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Restore clock speed */
+			iovalue = bus->sd_divisor;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_SDONLY;
+	} else {
+		/* Stop or slow the SD clock itself */
+		if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+			DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+			           __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+			return BCME_ERROR;
+		}
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			if (sd1idle) {
+				/* Change to SD1 mode and turn off clock */
+				iovalue = 1;
+				err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+				                      &iovalue, sizeof(iovalue), TRUE);
+				if (err) {
+					DHD_ERROR(("%s: error changing sd_clock: %d\n",
+					           __FUNCTION__, err));
+					return BCME_ERROR;
+				}
+			}
+
+			iovalue = 0;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Set divisor to idle value */
+			iovalue = bus->idleclock;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_NONE;
+	}
+
+	return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+	int ret = BCME_OK;
+#ifdef DHD_DEBUG
+	uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target) {
+		if (target == CLK_AVAIL) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+		}
+		return ret;
+	}
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			dhdsdio_sdclk(bus, TRUE);
+		/* Now request HT Avail on the backplane */
+		ret = dhdsdio_htclk(bus, TRUE, pendok);
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+		}
+		break;
+
+	case CLK_SDONLY:
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			ret = dhdsdio_sdclk(bus, TRUE);
+		else if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		else
+			DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+			           bus->clkstate, target));
+		if (ret == BCME_OK)
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		break;
+
+	case CLK_NONE:
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		/* Now remove the SD clock */
+		ret = dhdsdio_sdclk(bus, FALSE);
+		dhd_os_wd_timer(bus->dhd, 0);
+		break;
+	}
+#ifdef DHD_DEBUG
+	DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+	return ret;
+}
+
+int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+	          (sleep ? "SLEEP" : "WAKE"),
+	          (bus->sleeping ? "SLEEP" : "WAKE")));
+
+	/* Done if we're already in the requested state */
+	if (sleep == bus->sleeping)
+		return BCME_OK;
+
+	/* Going to sleep: set the alarm and turn off the lights... */
+	if (sleep) {
+		/* Don't sleep if something is pending */
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+			return BCME_BUSY;
+
+
+		/* Disable SDIO interrupts (no longer interested) */
+		bcmsdh_intr_disable(bus->sdh);
+
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+		/* Turn off our contribution to the HT clock request */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+		/* Isolate the bus */
+		if (bus->sih->chip != BCM4329_CHIP_ID && bus->sih->chip != BCM4319_CHIP_ID) {
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+					SBSDIO_DEVCTL_PADS_ISO, NULL);
+		}
+
+		/* Change state */
+		bus->sleeping = TRUE;
+
+	} else {
+		/* Waking up: bus power up is ok, set local state */
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 0, NULL);
+
+		/* Force pad isolation off if possible (in case power never toggled) */
+		if ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev >= 10))
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+		/* Make sure we have SD bus access */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+		/* Change state */
+		bus->sleeping = FALSE;
+
+		/* Enable interrupts again */
+		if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+			bus->intdis = FALSE;
+			bcmsdh_intr_enable(bus->sdh);
+		}
+	}
+
+	return BCME_OK;
+}
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+	bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (enable == TRUE) {
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+	} else {
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+	}
+
+	/* Turn off our contribution to the HT clock request */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+#define BUS_WAKE(bus) \
+	do { \
+		if ((bus)->sleeping) \
+			dhdsdio_bussleep((bus), FALSE); \
+	} while (0);
+
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+{
+	int ret;
+	osl_t *osh;
+	uint8 *frame;
+	uint16 len, pad = 0;
+	uint32 swheader;
+	uint retries = 0;
+	bcmsdh_info_t *sdh;
+	void *new;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	sdh = bus->sdh;
+	osh = bus->dhd->osh;
+
+	if (bus->dhd->dongle_reset) {
+		ret = BCME_NOTREADY;
+		goto done;
+	}
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+
+	/* Add alignment padding, allocate new packet if needed */
+	if ((pad = ((uintptr)frame % DHD_SDALIGN))) {
+		if (PKTHEADROOM(osh, pkt) < pad) {
+			DHD_INFO(("%s: insufficient headroom %d for %d pad\n",
+			          __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad));
+			bus->dhd->tx_realloc++;
+			new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+			if (!new) {
+				DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+				           __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+				ret = BCME_NOMEM;
+				goto done;
+			}
+
+			PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+			bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+			if (free_pkt)
+				PKTFREE(osh, pkt, TRUE);
+			/* free the pkt if canned one is not used */
+			free_pkt = TRUE;
+			pkt = new;
+			frame = (uint8*)PKTDATA(osh, pkt);
+			ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+			pad = 0;
+		} else {
+			PKTPUSH(osh, pkt, pad);
+			frame = (uint8*)PKTDATA(osh, pkt);
+
+			ASSERT((pad + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
+			bzero(frame, pad + SDPCM_HDRLEN);
+		}
+	}
+	ASSERT(pad < DHD_SDALIGN);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	len = (uint16)PKTLEN(osh, pkt);
+	*(uint16*)frame = htol16(len);
+	*(((uint16*)frame) + 1) = htol16(~len);
+
+	/* Software tag: channel, sequence number, data offset */
+	swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+	        (((pad + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+	htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+	tx_packets[PKTPRIO(pkt)]++;
+	if (DHD_BYTES_ON() &&
+	    (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+	      (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+		prhex("Tx Frame", frame, len);
+	} else if (DHD_HDRS_ON()) {
+		prhex("TxHdr", frame, MIN(len, 16));
+	}
+#endif
+
+	/* Raise len to next SDIO block to eliminate tail command */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+#ifdef NOTUSED
+			if (pad <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+				len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Some controllers have trouble with odd bytes -- round to even */
+	if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+		if (PKTTAILROOM(osh, pkt))
+#endif
+			len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+		else
+			DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+	}
+
+	do {
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                      frame, len, pkt, NULL, NULL);
+		bus->f2txdata++;
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+			          __FUNCTION__, ret));
+			bus->tx_sderrs++;
+
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+			                 SFC_WF_TERM, NULL);
+			bus->f1regdata++;
+
+			for (i = 0; i < 3; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+
+		}
+		if (ret == 0) {
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+		}
+	} while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+
+done:
+	/* restore pkt buffer pointer before calling tx complete routine */
+	PKTPULL(osh, pkt, SDPCM_HDRLEN + pad);
+	dhd_os_sdunlock(bus->dhd);
+	dhd_txcomplete(bus->dhd, pkt, ret != 0);
+	dhd_os_sdlock(bus->dhd);
+
+	if (free_pkt)
+		PKTFREE(osh, pkt, TRUE);
+
+	return ret;
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+	int ret = BCME_ERROR;
+	osl_t *osh;
+	uint datalen, prec;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	osh = bus->dhd->osh;
+	datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+	/* Push the test header if doing loopback */
+	if (bus->ext_loop) {
+		uint8* data;
+		PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+		data = PKTDATA(osh, pkt);
+		*data++ = SDPCM_TEST_ECHOREQ;
+		*data++ = (uint8)bus->loopid++;
+		*data++ = (datalen >> 0);
+		*data++ = (datalen >> 8);
+		datalen += SDPCM_TEST_HDRLEN;
+	}
+#endif /* SDTEST */
+
+	/* Add space for the header */
+	PKTPUSH(osh, pkt, SDPCM_HDRLEN);
+	ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+
+	/* Check for existing queue, current flow-control, pending event, or pending clock */
+	if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+	    (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+	    (bus->clkstate != CLK_AVAIL)) {
+		DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+			pktq_len(&bus->txq)));
+		bus->fcqueued++;
+
+		/* Priority based enq */
+		dhd_os_sdlock_txq(bus->dhd);
+		if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
+			PKTPULL(osh, pkt, SDPCM_HDRLEN);
+			dhd_txcomplete(bus->dhd, pkt, FALSE);
+			PKTFREE(osh, pkt, TRUE);
+			DHD_ERROR(("%s: out of bus->txq !!!\n", __FUNCTION__));
+			ret = BCME_NORESOURCE;
+		} else {
+			ret = BCME_OK;
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
+			dhd_txflowcontrol(bus->dhd, 0, ON);
+
+#ifdef DHD_DEBUG
+		if (pktq_plen(&bus->txq, prec) > qcount[prec])
+			qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+		/* Schedule DPC if needed to send queued packet(s) */
+		if (dhd_deferred_tx && !bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+	} else {
+		/* Lock: we're about to use shared data/code (and SDIO) */
+		dhd_os_sdlock(bus->dhd);
+
+		/* Otherwise, send it now */
+		BUS_WAKE(bus);
+		/* Make sure back plane ht clk is on, no pending allowed */
+		dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+#ifndef SDTEST
+		DHD_TRACE(("%s: calling txpkt\n", __FUNCTION__));
+		ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+		ret = dhdsdio_txpkt(bus, pkt,
+		        (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+		if (ret)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+	}
+
+
+	return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+	void *pkt;
+	uint32 intstatus = 0;
+	uint retries = 0;
+	int ret = 0, prec_out;
+	uint cnt = 0;
+	uint datalen;
+	uint8 tx_prec_map;
+
+	dhd_pub_t *dhd = bus->dhd;
+	sdpcmd_regs_t *regs = bus->regs;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	tx_prec_map = ~bus->flowcontrol;
+
+	/* Send frames until the limit or some other event */
+	for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+		dhd_os_sdlock_txq(bus->dhd);
+		if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+			dhd_os_sdunlock_txq(bus->dhd);
+			break;
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+		datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
+
+#ifndef SDTEST
+		ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+		ret = dhdsdio_txpkt(bus, pkt,
+		        (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+		if (ret)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr && cnt)
+		{
+			/* Check device status, signal pending interrupt */
+			R_SDREG(intstatus, &regs->intstatus, retries);
+			bus->f2txdata++;
+			if (bcmsdh_regfail(bus->sdh))
+				break;
+			if (intstatus & bus->hostintmask)
+				bus->ipend = TRUE;
+		}
+	}
+
+	/* Deflow-control stack if needed */
+	if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+	    dhd->txoff && (pktq_len(&bus->txq) < FCLOW))
+		dhd_txflowcontrol(dhd, 0, OFF);
+
+	return cnt;
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	uint8 *frame;
+	uint16 len;
+	uint32 swheader;
+	uint retries = 0;
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint8 doff = 0;
+	int ret = -1;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Back the pointer to make a room for bus header */
+	frame = msg - SDPCM_HDRLEN;
+	len = (msglen += SDPCM_HDRLEN);
+
+	/* Add alignment padding (optional for ctl frames) */
+	if (dhd_alignctl) {
+		if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+			frame -= doff;
+			len += doff;
+			msglen += doff;
+			bzero(frame, doff + SDPCM_HDRLEN);
+		}
+		ASSERT(doff < DHD_SDALIGN);
+	}
+	doff += SDPCM_HDRLEN;
+
+	/* Round send length to next SDIO block */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+			len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (len & (ALIGNMENT - 1)))
+		len = ROUNDUP(len, ALIGNMENT);
+
+	ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+	/* Need to lock here to protect txseq and SDIO tx calls */
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	*(uint16*)frame = htol16((uint16)msglen);
+	*(((uint16*)frame) + 1) = htol16(~msglen);
+
+	/* Software tag: channel, sequence number, data offset */
+	swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+	        | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+	htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+	if (!DATAOK(bus)) {
+		DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+			__FUNCTION__, bus->tx_max, bus->tx_seq));
+		bus->ctrl_frame_stat = TRUE;
+		/* Send from dpc */
+		bus->ctrl_frame_buf = frame;
+		bus->ctrl_frame_len = len;
+
+		dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+
+		if (bus->ctrl_frame_stat == FALSE) {
+			DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+			ret = 0;
+		} else {
+			DHD_INFO(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__));
+			ret = -1;
+		}
+	}
+
+	if (ret == -1) {
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+			prhex("Tx Frame", frame, len);
+		} else if (DHD_HDRS_ON()) {
+			prhex("TxHdr", frame, MIN(len, 16));
+		}
+#endif
+
+		do {
+			bus->ctrl_frame_stat = FALSE;
+			ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+			                          frame, len, NULL, NULL, NULL);
+			ASSERT(ret != BCME_PENDING);
+
+			if (ret < 0) {
+				/* On failure, abort the command and terminate the frame */
+				DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+				          __FUNCTION__, ret));
+				bus->tx_sderrs++;
+
+				bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+				                 SFC_WF_TERM, NULL);
+				bus->f1regdata++;
+
+				for (i = 0; i < 3; i++) {
+					uint8 hi, lo;
+					hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					                     SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+					lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					                     SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+					bus->f1regdata += 2;
+					if ((hi == 0) && (lo == 0))
+						break;
+				}
+
+			}
+			if (ret == 0) {
+				bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+			}
+		} while ((ret < 0) && retries++ < TXRETRIES);
+	}
+
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (ret)
+		bus->dhd->tx_ctlerrs++;
+	else
+		bus->dhd->tx_ctlpkts++;
+
+	return ret ? -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+	dhd_os_sdlock(bus->dhd);
+	rxlen = bus->rxlen;
+	bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+	bus->rxlen = 0;
+	dhd_os_sdunlock(bus->dhd);
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+		         __FUNCTION__, rxlen, msglen));
+	} else if (timeleft == 0) {
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef DHD_DEBUG_TRAP
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG_TRAP */
+	} else if (pending == TRUE) {
+		DHD_CTL(("%s: cancelled\n", __FUNCTION__));
+		return -ERESTARTSYS;
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG_TRAP
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG_TRAP */
+	}
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	return rxlen ? (int)rxlen : -ETIMEDOUT;
+}
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_POLLRATE,
+	IOV_SDREG,
+	IOV_SBREG,
+	IOV_SDCIS,
+	IOV_MEMBYTES,
+	IOV_MEMSIZE,
+#ifdef DHD_DEBUG_TRAP
+	IOV_CHECKDIED,
+#endif
+	IOV_DOWNLOAD,
+	IOV_FORCEEVEN,
+	IOV_SDIOD_DRIVE,
+	IOV_READAHEAD,
+	IOV_SDRXCHAIN,
+	IOV_ALIGNCTL,
+	IOV_SDALIGN,
+	IOV_DEVRESET,
+	IOV_CPU,
+#ifdef SDTEST
+	IOV_PKTGEN,
+	IOV_EXTLOOP,
+#endif /* SDTEST */
+	IOV_SPROM,
+	IOV_TXBOUND,
+	IOV_RXBOUND,
+	IOV_TXMINMAX,
+	IOV_IDLETIME,
+	IOV_IDLECLOCK,
+	IOV_SD1IDLE,
+	IOV_SLEEP,
+	IOV_VARS
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"sleep",	IOV_SLEEP,	0,	IOVT_BOOL,	0 },
+	{"pollrate",	IOV_POLLRATE,	0,	IOVT_UINT32,	0 },
+	{"idletime",	IOV_IDLETIME,	0,	IOVT_INT32,	0 },
+	{"idleclock",	IOV_IDLECLOCK,	0,	IOVT_INT32,	0 },
+	{"sd1idle",	IOV_SD1IDLE,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"memsize",	IOV_MEMSIZE,	0,	IOVT_UINT32,	0 },
+	{"download",	IOV_DOWNLOAD,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"sdiod_drive",	IOV_SDIOD_DRIVE, 0,	IOVT_UINT32,	0 },
+	{"readahead",	IOV_READAHEAD,	0,	IOVT_BOOL,	0 },
+	{"sdrxchain",	IOV_SDRXCHAIN,	0,	IOVT_BOOL,	0 },
+	{"alignctl",	IOV_ALIGNCTL,	0,	IOVT_BOOL,	0 },
+	{"sdalign",	IOV_SDALIGN,	0,	IOVT_BOOL,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"sdreg",	IOV_SDREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_cis",	IOV_SDCIS,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"forcealign",	IOV_FORCEEVEN,	0,	IOVT_BOOL,	0 },
+	{"txbound",	IOV_TXBOUND,	0,	IOVT_UINT32,	0 },
+	{"rxbound",	IOV_RXBOUND,	0,	IOVT_UINT32,	0 },
+	{"txminmax", IOV_TXMINMAX,	0,	IOVT_UINT32,	0 },
+	{"cpu",		IOV_CPU,	0,	IOVT_BOOL,	0 },
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG_TRAP
+	{"checkdied",	IOV_CHECKDIED,	0,	IOVT_BUFFER,	0 },
+#endif /* DHD_DEBUG_TRAP  */
+#ifdef SDTEST
+	{"extloop",	IOV_EXTLOOP,	0,	IOVT_BOOL,	0 },
+	{"pktgen",	IOV_PKTGEN,	0,	IOVT_BUFFER,	sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+
+	{NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+	uint q1, q2;
+
+	if (!div) {
+		bcm_bprintf(strbuf, "%s N/A", desc);
+	} else {
+		q1 = num / div;
+		q2 = (100 * (num - (q1 * div))) / div;
+		bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+	}
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+	bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+	            bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+	bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
+	            bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+	            bus->rxlen, bus->rx_seq);
+	bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+	            bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+	bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+	            bus->pollrate, bus->pollcnt, bus->regfails);
+
+	bcm_bprintf(strbuf, "\nAdditional counters:\n");
+	bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
+	            bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+	            bus->rxc_errors);
+	bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+	            bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+	bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+	            bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+	bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+	            bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+	bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
+	            (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+	            bus->f2txdata, bus->f1regdata);
+	{
+		dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+		             bus->dhd->rx_packets);
+		dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+		             (bus->f2txdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+		bcm_bprintf(strbuf, "\n\n");
+	}
+
+#ifdef SDTEST
+	if (bus->pktgen_count) {
+		bcm_bprintf(strbuf, "pktgen config and count:\n");
+		bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n",
+		            bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+		            bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+		bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+		            bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+	}
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+	bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+	            bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+	bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+	bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+	            bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+	bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+	bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+	bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+	bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+	bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+
+	pktgen.version = DHD_PKTGEN_VERSION;
+	pktgen.freq = bus->pktgen_freq;
+	pktgen.count = bus->pktgen_count;
+	pktgen.print = bus->pktgen_print;
+	pktgen.total = bus->pktgen_total;
+	pktgen.minlen = bus->pktgen_minlen;
+	pktgen.maxlen = bus->pktgen_maxlen;
+	pktgen.numsent = bus->pktgen_sent;
+	pktgen.numrcvd = bus->pktgen_rcvd;
+	pktgen.numfail = bus->pktgen_fail;
+	pktgen.mode = bus->pktgen_mode;
+	pktgen.stop = bus->pktgen_stop;
+
+	bcopy(&pktgen, arg, sizeof(pktgen));
+
+	return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+	uint oldcnt, oldmode;
+
+	bcopy(arg, &pktgen, sizeof(pktgen));
+	if (pktgen.version != DHD_PKTGEN_VERSION)
+		return BCME_BADARG;
+
+	oldcnt = bus->pktgen_count;
+	oldmode = bus->pktgen_mode;
+
+	bus->pktgen_freq = pktgen.freq;
+	bus->pktgen_count = pktgen.count;
+	bus->pktgen_print = pktgen.print;
+	bus->pktgen_total = pktgen.total;
+	bus->pktgen_minlen = pktgen.minlen;
+	bus->pktgen_maxlen = pktgen.maxlen;
+	bus->pktgen_mode = pktgen.mode;
+	bus->pktgen_stop = pktgen.stop;
+
+	bus->pktgen_tick = bus->pktgen_ptick = 0;
+	bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+	bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+	/* Clear counts for a new pktgen (mode change, or was stopped) */
+	if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
+		bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
+
+	return 0;
+}
+#endif /* SDTEST */
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint32 sdaddr;
+	uint dsize;
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	/* Set the backplane window to include the start address */
+	if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+		DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+		goto xfer_done;
+	}
+
+	/* Do the transfer(s) */
+	while (size) {
+		DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+		          __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+		          (address & SBSDIO_SBWINDOW_MASK)));
+		if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+			DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* Adjust for next transfer (if any) */
+		if ((size -= dsize)) {
+			data += dsize;
+			address += dsize;
+			if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+				DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+				break;
+			}
+			sdaddr = 0;
+			dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+	}
+
+xfer_done:
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+		DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+			bcmsdh_cur_sbwad(bus->sdh)));
+	}
+
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG_TRAP
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+	uint32 addr;
+	int rv;
+
+	/* Read last word in memory to determine address of sdpcm_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, bus->ramsize - 4, (uint8 *)&addr, 4)) < 0)
+		return rv;
+
+	addr = ltoh32(addr);
+
+	DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+	/*
+	 * Check if addr is valid.
+	 * NVRAM length at the end of memory should have been overwritten.
+	 */
+	if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+		DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", __FUNCTION__, addr));
+		return BCME_ERROR;
+	}
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+		return rv;
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+		DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+		           "is different than sdpcm_shared version %d in dongle\n",
+		           __FUNCTION__, SDPCM_SHARED_VERSION,
+		           sh->flags & SDPCM_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	sdpcm_shared_t sdpcm_shared;
+	struct bcmstrbuf strbuf;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+	if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (sdpcm_shared.assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (sdpcm_shared.assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+		}
+
+		if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 sdpcm_shared.trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			"lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n",
+			tr.type, tr.epc, tr.cpsr, tr.spsr, tr.r13, tr.r14, tr.pc,
+			sdpcm_shared.trap_addr,
+			tr.r0, tr.r1, tr.r2, tr.r3, tr.r4, tr.r5, tr.r6, tr.r7);
+		}
+	}
+
+	if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+
+	return bcmerror;
+}
+#endif /* DHD_DEBUG_TRAP */
+
+#ifdef DHD_DEBUG
+#define CONSOLE_LINE_MAX	192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+#endif /* DHD_DEBUG */
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+	/* Some ioctls use the bus */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	/* Handle sleep stuff before any clock mucking */
+	if (vi->varid == IOV_SLEEP) {
+		if (IOV_ISSET(actionid)) {
+			bcmerror = dhdsdio_bussleep(bus, bool_val);
+		} else {
+			int_val = (int32)bus->sleeping;
+			bcopy(&int_val, arg, val_size);
+		}
+		goto exit;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	if (!bus->dhd->dongle_reset) {
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	}
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_INTR):
+		int_val = (int32)bus->intr;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_INTR):
+		bus->intr = bool_val;
+		bus->intdis = FALSE;
+		if (bus->dhd->up) {
+			if (bus->intr) {
+				DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+				bcmsdh_intr_enable(bus->sdh);
+			} else {
+				DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+				bcmsdh_intr_disable(bus->sdh);
+			}
+		}
+		break;
+
+	case IOV_GVAL(IOV_POLLRATE):
+		int_val = (int32)bus->pollrate;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POLLRATE):
+		bus->pollrate = (uint)int_val;
+		bus->poll = (bus->pollrate != 0);
+		break;
+
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_IDLECLOCK):
+		int_val = (int32)bus->idleclock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLECLOCK):
+		bus->idleclock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SD1IDLE):
+		int_val = (int32)sd1idle;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SD1IDLE):
+		sd1idle = bool_val;
+		break;
+
+
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+		          (set ? "write" : "read"), size, address));
+
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) {
+			DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+			           __FUNCTION__, bus->orig_ramsize, size, address));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_MEMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_SDIOD_DRIVE):
+		int_val = (int32)dhd_sdiod_drive_strength;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDIOD_DRIVE):
+		dhd_sdiod_drive_strength = int_val;
+		si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+		break;
+
+	case IOV_SVAL(IOV_DOWNLOAD):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdsdio_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_GVAL(IOV_READAHEAD):
+		int_val = (int32)dhd_readahead;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_READAHEAD):
+		if (bool_val && !dhd_readahead)
+			bus->nextlen = 0;
+		dhd_readahead = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDRXCHAIN):
+		int_val = (int32)bus->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDRXCHAIN):
+		if (bool_val && !bus->sd_rxchain)
+			bcmerror = BCME_UNSUPPORTED;
+		else
+			bus->use_rxchain = bool_val;
+		break;
+	case IOV_GVAL(IOV_ALIGNCTL):
+		int_val = (int32)dhd_alignctl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_ALIGNCTL):
+		dhd_alignctl = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDALIGN):
+		int_val = DHD_SDALIGN;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_VARS):
+		if (bus->varsz < (uint)len)
+			bcopy(bus->vars, arg, bus->varsz);
+		else
+			bcmerror = BCME_BUFTOOSHORT;
+		break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uintptr)bus->regs + sd_ptr->offset;
+		size = sd_ptr->func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uintptr)bus->regs + sd_ptr->offset;
+		size = sd_ptr->func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	/* Same as above, but offset is not backplane (not SDIO core) */
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	case IOV_GVAL(IOV_SDCIS):
+	{
+		*(char *)arg = 0;
+
+		bcmstrcat(arg, "\nFunc 0\n");
+		bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 1\n");
+		bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 2\n");
+		bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		break;
+	}
+
+	case IOV_GVAL(IOV_FORCEEVEN):
+		int_val = (int32)forcealign;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FORCEEVEN):
+		forcealign = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TXMINMAX):
+		int_val = (int32)dhd_txminmax;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXMINMAX):
+		dhd_txminmax = (uint)int_val;
+		break;
+
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+	case IOV_GVAL(IOV_EXTLOOP):
+		int_val = (int32)bus->ext_loop;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_EXTLOOP):
+		bus->ext_loop = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_get(bus, arg);
+		break;
+
+	case IOV_SVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_set(bus, arg);
+		break;
+#endif /* SDTEST */
+
+
+	case IOV_SVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+		           __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+		           bus->dhd->busstate));
+
+		ASSERT(bus->dhd->osh);
+		/* ASSERT(bus->cl_devid); */
+
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+		break;
+
+	case IOV_GVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+		/* Get its status */
+		int_val = (bool) bus->dhd->dongle_reset;
+		bcopy(&int_val, arg, val_size);
+
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == FALSE)
+		dhd_preinit_ioctls((dhd_pub_t *) bus->dhd);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	char *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	if (bus->vars) {
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+
+		/* Write the vars list */
+		bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (char*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		bus->orig_ramsize, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((bus->orig_ramsize - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdsdio_membytes(bus, TRUE, (bus->orig_ramsize - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+	uint retries;
+	int bcmerror = 0;
+
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+
+		bus->alp_only = TRUE;
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		si_core_disable(bus->sih, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Clear the top bit of memory */
+		if (bus->ramsize) {
+			uint32 zeros = 0;
+			dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4);
+		}
+	} else {
+		if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if (!si_iscoreup(bus->sih)) {
+			DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if ((bcmerror = dhdsdio_write_vars(bus))) {
+			DHD_ERROR(("%s: no vars written to RAM\n", __FUNCTION__));
+			bcmerror = 0;
+		}
+
+		if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+		    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+			DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+		W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to SDIOD core */
+	if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+		si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+		dhd_os_sdlock(bus->dhd);
+
+		BUS_WAKE(bus);
+
+		/* Turn on clock in case SD command needs backplane */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+		/* Check for bus configuration changes of interest */
+
+		/* If it was divisor change, read the new one */
+		if (set && strcmp(name, "sd_divisor") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_divisor = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_divisor));
+			}
+		}
+		/* If it was a mode change, read the new one */
+		if (set && strcmp(name, "sd_mode") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_mode = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_mode));
+			}
+		}
+		/* Similar check for blocksize change */
+		if (set && strcmp(name, "sd_blocksize") == 0) {
+			int32 fnum = 2;
+			if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+			                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, "sd_blocksize", bus->blocksize));
+			}
+		}
+		bus->roundup = MIN(max_roundup, bus->blocksize);
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint32 local_hostintmask;
+	uint8 saveclk;
+	uint retries;
+	int err;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Change our idea of bus state */
+	bus->dhd->busstate = DHD_BUS_DOWN;
+
+	/* Enable clock for device interrupts */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Disable and clear interrupts at the chip level also */
+	W_SDREG(0, &bus->regs->hostintmask, retries);
+	local_hostintmask = bus->hostintmask;
+	bus->hostintmask = 0;
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+	}
+
+	/* Turn off the bus (F2), free any pending packets */
+	DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	bcmsdh_intr_disable(bus->sdh);
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	/* Clear any pending interrupts now that F2 is disabled */
+	W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+
+	/* Turn off the backplane clock (only) */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+	/* Clear the data packet queues */
+	pktq_flush(osh, &bus->txq, TRUE);
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		PKTFREE(osh, bus->glomd, FALSE);
+
+	if (bus->glom)
+		PKTFREE(osh, bus->glom, FALSE);
+
+	bus->glom = bus->glomd = NULL;
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = FALSE;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	dhd_timeout_t tmo;
+	uint retries = 0;
+	uint8 ready, enable;
+	int err, ret = BCME_ERROR;
+	uint8 saveclk;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return BCME_OK;
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	err = dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if ((err != BCME_OK) || (bus->clkstate != CLK_AVAIL)) {
+		DHD_ERROR(("%s: Failed to set backplane clock: err %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+	        &bus->regs->tosbmailboxdata, retries);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+	/* Give the dongle some time to do its thing and set IOR2 */
+	dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+	ready = 0;
+	while (ready != enable && !dhd_timeout_expired(&tmo))
+	        ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+
+	DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+	          __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (ready == enable) {
+		/* Make sure we're talking to the core. */
+		if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+			bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err);
+
+		/* Set bus state according to enable result */
+		dhdp->busstate = DHD_BUS_DATA;
+
+		/* bcmsdh_intr_unmask(bus->sdh); */
+
+		bus->intdis = FALSE;
+		if (bus->intr) {
+			DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+			bcmsdh_intr_enable(bus->sdh);
+		} else {
+			DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+			bcmsdh_intr_disable(bus->sdh);
+		}
+
+	}
+
+
+	else {
+		/* Disable F2 again */
+		enable = SDIO_FUNC_ENABLE_1;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+	}
+
+	/* Restore previous clock setting */
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+
+	/* If we didn't come up, turn off backplane clock */
+	if (dhdp->busstate != DHD_BUS_DATA)
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+	ret = BCME_OK;
+exit:
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+
+	return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+	uint16 lastrbc;
+	uint8 hi, lo;
+	int err;
+
+	DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+	           (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+	if (abort) {
+		bcmsdh_abort(sdh, SDIO_FUNC_2);
+	}
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+	bus->f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+		lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+		bus->f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+			           __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries) {
+		DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+	} else {
+		DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+	}
+
+	if (rtx) {
+		bus->rxrtx++;
+		W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+		bus->f1regdata++;
+		if (retries <= retry_limit) {
+			bus->rxskip = TRUE;
+		}
+	}
+
+	/* Clear partial in any case */
+	bus->nextlen = 0;
+
+	/* If we can't reach the device, signal failure */
+	if (err || bcmsdh_regfail(sdh))
+		bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint rdlen, pad;
+
+	int sdret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Control data already received in aligned rxctl */
+	if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+		goto gotpkt;
+
+	ASSERT(bus->rxbuf);
+	/* Set rxctl for frame (w/optional alignment) */
+	bus->rxctl = bus->rxbuf;
+	if (dhd_alignctl) {
+		bus->rxctl += firstread;
+		if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+			bus->rxctl += (DHD_SDALIGN - pad);
+		bus->rxctl -= firstread;
+	}
+	ASSERT(bus->rxctl >= bus->rxbuf);
+
+	/* Copy the already-read portion over */
+	bcopy(hdr, bus->rxctl, firstread);
+	if (len <= firstread)
+		goto gotpkt;
+
+	/* Copy the full data pkt in gSPI case and process ioctl. */
+	if (bus->bus == SPI_BUS) {
+		bcopy(hdr, bus->rxctl, len);
+		goto gotpkt;
+	}
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - firstread;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->dhd->maxctl))
+			rdlen += pad;
+	} else if (rdlen % DHD_SDALIGN) {
+		rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (rdlen & (ALIGNMENT - 1)))
+		rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + firstread) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+		           __FUNCTION__, rdlen, bus->dhd->maxctl));
+		bus->dhd->rx_errors++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+	if ((len - doff) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+		           __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+		bus->dhd->rx_errors++; bus->rx_toolong++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+
+	/* Read remainder of frame body into the rxctl buffer */
+	sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+	                            (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+	bus->f2rxdata++;
+	ASSERT(sdret != BCME_PENDING);
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+		bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+		dhdsdio_rxfail(bus, TRUE, TRUE);
+		goto done;
+	}
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+	if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+		prhex("RxCtrl", bus->rxctl, len);
+	}
+#endif
+
+	/* Point to valid data and indicate its length */
+	bus->rxctl += doff;
+	bus->rxlen = len - doff;
+
+done:
+	/* Awake any waiters */
+	dhd_os_ioctl_resp_wake(bus->dhd);
+}
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+	uint16 dlen, totlen;
+	uint8 *dptr, num = 0;
+
+	uint16 sublen, check;
+	void *pfirst, *plast, *pnext, *save_pfirst;
+	osl_t *osh = bus->dhd->osh;
+
+	int errcode;
+	uint8 chan, seq, doff, sfdoff;
+	uint8 txmax;
+
+	int ifidx = 0;
+	bool usechain = bus->use_rxchain;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		dhd_os_sdlock_rxq(bus->dhd);
+
+		pfirst = plast = pnext = NULL;
+		dlen = (uint16)PKTLEN(osh, bus->glomd);
+		dptr = PKTDATA(osh, bus->glomd);
+		if (!dlen || (dlen & 1)) {
+			DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+			           __FUNCTION__, dlen));
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = ltoh16_ua(dptr);
+			dlen -= sizeof(uint16);
+			dptr += sizeof(uint16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+				           __FUNCTION__, num, sublen));
+				pnext = NULL;
+				break;
+			}
+			if (sublen % DHD_SDALIGN) {
+				DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+				           __FUNCTION__, sublen, DHD_SDALIGN));
+				usechain = FALSE;
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total is a block multiple */
+			if (!dlen) {
+				sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+				totlen = ROUNDUP(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				           __FUNCTION__, num, sublen));
+				break;
+			}
+			ASSERT(!PKTLINK(pnext));
+			if (!pfirst) {
+				ASSERT(!plast);
+				pfirst = plast = pnext;
+			} else {
+				ASSERT(plast);
+				PKTSETNEXT(osh, plast, pnext);
+				plast = pnext;
+			}
+
+			/* Adhere to start alignment requirements */
+			PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+		}
+
+		/* If all allocations succeeded, save packet chain in bus structure */
+		if (pnext) {
+			DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+			          __FUNCTION__, totlen, num));
+			if (DHD_GLOM_ON() && bus->nextlen) {
+				if (totlen != bus->nextlen) {
+					DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+					          "rxseq %d\n", __FUNCTION__, bus->nextlen,
+					          totlen, rxseq));
+				}
+			}
+			bus->glom = pfirst;
+			pfirst = pnext = NULL;
+		} else {
+			if (pfirst)
+				PKTFREE(osh, pfirst, FALSE);
+			bus->glom = NULL;
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		PKTFREE(osh, bus->glomd, FALSE);
+		bus->glomd = NULL;
+		bus->nextlen = 0;
+
+		dhd_os_sdunlock_rxq(bus->dhd);
+	}
+
+	/* Ok -- either we just generated a packet chain, or had one from before */
+	if (bus->glom) {
+		if (DHD_GLOM_ON()) {
+			DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+			for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
+				          pnext, (uint8*)PKTDATA(osh, pnext),
+				          PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+			}
+		}
+
+		pfirst = bus->glom;
+		dlen = (uint16)pkttotlen(osh, pfirst);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		if (usechain) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+			                              dlen, pfirst, NULL, NULL);
+		} else if (bus->dataptr) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, bus->dataptr,
+			                              dlen, NULL, NULL, NULL);
+			sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+			if (sublen != dlen) {
+				DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+				           __FUNCTION__, dlen, sublen));
+				errcode = -1;
+			}
+			pnext = NULL;
+		} else {
+			DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+			errcode = -1;
+		}
+		bus->f2rxdata++;
+		ASSERT(errcode != BCME_PENDING);
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+			           __FUNCTION__, dlen, errcode));
+			bus->dhd->rx_errors++;
+
+			if (bus->glomerr++ < 3) {
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			return 0;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_GLOM_ON()) {
+			prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+			      MIN(PKTLEN(osh, pfirst), 48));
+		}
+#endif
+
+
+		/* Validate the superframe header */
+		dptr = (uint8 *)PKTDATA(osh, pfirst);
+		sublen = ltoh16_ua(dptr);
+		check = ltoh16_ua(dptr + sizeof(uint16));
+
+		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+		bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+		errcode = 0;
+		if ((uint16)~(sublen^check)) {
+			DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, sublen, check));
+			errcode = -1;
+		} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+			DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+			           __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+			errcode = -1;
+		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+			DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+			           SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+			errcode = -1;
+		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+			DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+			errcode = -1;
+		} else if ((doff < SDPCM_HDRLEN) ||
+		           (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+			           __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN));
+			errcode = -1;
+		}
+
+		/* Check sequence number of superframe SW header */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+			          __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_seq + 2;
+		}
+		bus->tx_max = txmax;
+
+		/* Remove superframe header, remember offset */
+		PKTPULL(osh, pfirst, doff);
+		sfdoff = doff;
+
+		/* Validate all the subframe headers */
+		for (num = 0, pnext = pfirst; pnext && !errcode;
+		     num++, pnext = PKTNEXT(osh, pnext)) {
+			dptr = (uint8 *)PKTDATA(osh, pnext);
+			dlen = (uint16)PKTLEN(osh, pnext);
+			sublen = ltoh16_ua(dptr);
+			check = ltoh16_ua(dptr + sizeof(uint16));
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				prhex("subframe", dptr, 32);
+			}
+#endif
+
+			if ((uint16)~(sublen^check)) {
+				DHD_ERROR(("%s (subframe %d): HW hdr error: "
+				           "len/check 0x%04x/0x%04x\n",
+				           __FUNCTION__, num, sublen, check));
+				errcode = -1;
+			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+				DHD_ERROR(("%s (subframe %d): length mismatch: "
+				           "len 0x%04x, expect 0x%04x\n",
+				           __FUNCTION__, num, sublen, dlen));
+				errcode = -1;
+			} else if ((chan != SDPCM_DATA_CHANNEL) &&
+			           (chan != SDPCM_EVENT_CHANNEL)) {
+				DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+				           __FUNCTION__, num, chan));
+				errcode = -1;
+			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+				DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+				           __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+				errcode = -1;
+			}
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request a couple retries */
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				PKTPUSH(osh, pfirst, sfdoff);
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			bus->nextlen = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+		save_pfirst = pfirst;
+		bus->glom = NULL;
+		plast = NULL;
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+			pnext = PKTNEXT(osh, pfirst);
+			PKTSETNEXT(osh, pfirst, NULL);
+
+			dptr = (uint8 *)PKTDATA(osh, pfirst);
+			sublen = ltoh16_ua(dptr);
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+			          __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+			          PKTLEN(osh, pfirst), sublen, chan, seq));
+
+			ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+			if (rxseq != seq) {
+				DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Subframe Data", dptr, dlen);
+			}
+#endif
+
+			PKTSETLEN(osh, pfirst, sublen);
+			PKTPULL(osh, pfirst, doff);
+
+			if (PKTLEN(osh, pfirst) == 0) {
+				PKTFREE(bus->dhd->osh, pfirst, FALSE);
+				if (plast) {
+					PKTSETNEXT(osh, plast, pnext);
+				} else {
+					ASSERT(save_pfirst == pfirst);
+					save_pfirst = pnext;
+				}
+				continue;
+			} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) != 0) {
+				DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+				bus->dhd->rx_errors++;
+				PKTFREE(osh, pfirst, FALSE);
+				if (plast) {
+					PKTSETNEXT(osh, plast, pnext);
+				} else {
+					ASSERT(save_pfirst == pfirst);
+					save_pfirst = pnext;
+				}
+				continue;
+			}
+
+			/* this packet will go up, link back into chain and count it */
+			PKTSETNEXT(osh, pfirst, pnext);
+			plast = pfirst;
+			num++;
+
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+				          __FUNCTION__, num, pfirst,
+				          PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+				          PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+				prhex("", (uint8 *)PKTDATA(osh, pfirst),
+				      MIN(PKTLEN(osh, pfirst), 32));
+			}
+#endif /* DHD_DEBUG */
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+		if (num) {
+			dhd_os_sdunlock(bus->dhd);
+			dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num);
+			dhd_os_sdlock(bus->dhd);
+		}
+
+		bus->rxglomframes++;
+		bus->rxglompkts += num;
+	}
+	return num;
+}
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+	osl_t *osh = bus->dhd->osh;
+	bcmsdh_info_t *sdh = bus->sdh;
+
+	uint16 len, check;	/* Extracted hardware header fields */
+	uint8 chan, seq, doff;	/* Extracted software header fields */
+	uint8 fcbits;		/* Extracted fcbits from software header */
+	uint8 delta;
+
+	void *pkt;	/* Packet for event or data frames */
+	uint16 pad;	/* Number of pad bytes to read */
+	uint16 rdlen;	/* Total number of bytes to read */
+	uint8 rxseq;	/* Next sequence number to expect */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int sdret;	/* Return code from bcmsdh calls */
+	uint8 txmax;	/* Maximum tx sequence offered */
+	bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+	uint8 *rxbuf;
+	int ifidx = 0;
+	uint rxcount = 0; /* Total frames read */
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+	bool sdtest = FALSE;	/* To limit message spew from test mode */
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(maxframes);
+
+#ifdef SDTEST
+	/* Allow pktgen to override maxframes */
+	if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+		maxframes = bus->pktgen_count;
+		sdtest = TRUE;
+	}
+#endif
+
+	/* Not finished unless we encounter no more frames indication */
+	*finished = FALSE;
+
+
+	for (rxseq = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+	     rxseq++, rxleft--) {
+
+		/* Handle glomming separately */
+		if (bus->glom || bus->glomd) {
+			uint8 cnt;
+			DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+			          __FUNCTION__, bus->glomd, bus->glom));
+			cnt = dhdsdio_rxglom(bus, rxseq);
+			DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+			rxseq += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		/* Try doing single read if we can */
+		if (dhd_readahead && bus->nextlen) {
+			uint16 nextlen = bus->nextlen;
+			bus->nextlen = 0;
+
+			if (bus->bus == SPI_BUS) {
+				rdlen = len = nextlen;
+			}
+			else {
+				rdlen = len = nextlen << 4;
+
+				/* Pad read to blocksize for efficiency */
+				if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+					pad = bus->blocksize - (rdlen % bus->blocksize);
+					if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+						((rdlen + pad + firstread) < MAX_RX_DATASZ))
+						rdlen += pad;
+				} else if (rdlen % DHD_SDALIGN) {
+					rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+				}
+			}
+
+			/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+			 * Later we use buffer-poll for data as well as control packets.
+			 * This is required becuase dhd receives full frame in gSPI unlike SDIO.
+			 * After the frame is received we have to distinguish whether it is data
+			 * or non-data frame.
+			 */
+			/* Allocate a packet buffer */
+			dhd_os_sdlock_rxq(bus->dhd);
+			if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+				if (bus->bus == SPI_BUS) {
+					bus->usebufpool = FALSE;
+					bus->rxctl = bus->rxbuf;
+					if (dhd_alignctl) {
+						bus->rxctl += firstread;
+						if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+							bus->rxctl += (DHD_SDALIGN - pad);
+						bus->rxctl -= firstread;
+					}
+					ASSERT(bus->rxctl >= bus->rxbuf);
+					rxbuf = bus->rxctl;
+					/* Read the entire frame */
+					sdret = dhd_bcmsdh_recv_buf(bus,
+					                            bcmsdh_cur_sbwad(sdh),
+					                            SDIO_FUNC_2,
+					                            F2SYNC, rxbuf, rdlen,
+					                            NULL, NULL, NULL);
+					bus->f2rxdata++;
+					ASSERT(sdret != BCME_PENDING);
+
+
+					/* Control frame failures need retransmission */
+					if (sdret < 0) {
+						DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+						   __FUNCTION__, rdlen, sdret));
+						/* dhd.rx_ctlerrs is higher level */
+						bus->rxc_errors++;
+						dhd_os_sdunlock_rxq(bus->dhd);
+						dhdsdio_rxfail(bus, TRUE,
+						    (bus->bus == SPI_BUS) ? FALSE : TRUE);
+						continue;
+					}
+				} else {
+					/* Give up on data, request rtx of events */
+					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+					           "expected rxseq %d\n",
+					           __FUNCTION__, len, rdlen, rxseq));
+					/* Just go try again w/normal header read */
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			} else {
+				if (bus->bus == SPI_BUS)
+					bus->usebufpool = TRUE;
+
+				ASSERT(!PKTLINK(pkt));
+				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+				rxbuf = (uint8 *)PKTDATA(osh, pkt);
+				/* Read the entire frame */
+				sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+				                            SDIO_FUNC_2,
+				                            F2SYNC, rxbuf, rdlen,
+				                            pkt, NULL, NULL);
+				bus->f2rxdata++;
+				ASSERT(sdret != BCME_PENDING);
+
+				if (sdret < 0) {
+					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+					   __FUNCTION__, rdlen, sdret));
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+					bus->dhd->rx_errors++;
+					dhd_os_sdunlock_rxq(bus->dhd);
+					/* Force retry w/normal header read.  Don't attemp NAK for
+					 * gSPI
+					 */
+					dhdsdio_rxfail(bus, TRUE,
+					      (bus->bus == SPI_BUS) ? FALSE : TRUE);
+					continue;
+				}
+			}
+			dhd_os_sdunlock_rxq(bus->dhd);
+
+			/* Now check the header */
+			bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+			/* Extract hardware header fields */
+			len = ltoh16_ua(bus->rxhdr);
+			check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+			/* All zeros means readahead info was bad */
+			if (!(len|check)) {
+				DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+				           __FUNCTION__));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate check bytes */
+			if ((uint16)~(len^check)) {
+				DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+				           " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+				           len, check));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rx_badhdr++;
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate frame length */
+			if (len < SDPCM_HDRLEN) {
+				DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+				           __FUNCTION__, len));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Check for consistency with readahead info */
+				len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+			if (len_consistent) {
+				/* Mismatch, force retry w/normal header (may be >4K) */
+				DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+				           "expected rxseq %d\n",
+				           __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+
+			/* Extract software header fields */
+			chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+				bus->nextlen =
+				         bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+				if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+					DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+					          " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+					          seq));
+					bus->nextlen = 0;
+				}
+
+				bus->dhd->rx_readahead_cnt ++;
+			/* Handle Flow Control */
+			fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+			delta = 0;
+			if (~bus->flowcontrol & fcbits) {
+				bus->fc_xoff++;
+				delta = 1;
+			}
+			if (bus->flowcontrol & ~fcbits) {
+				bus->fc_xon++;
+				delta = 1;
+			}
+
+			if (delta) {
+				bus->fc_rcvd++;
+				bus->flowcontrol = fcbits;
+			}
+
+			/* Check and update sequence number */
+			if (rxseq != seq) {
+				DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+			/* Check window for sanity */
+			if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+					DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+						__FUNCTION__, txmax, bus->tx_seq));
+					txmax = bus->tx_seq + 2;
+			}
+			bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Data", rxbuf, len);
+			} else if (DHD_HDRS_ON()) {
+				prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+			}
+#endif
+
+			if (chan == SDPCM_CONTROL_CHANNEL) {
+				if (bus->bus == SPI_BUS) {
+					dhdsdio_read_control(bus, rxbuf, len, doff);
+					if (bus->usebufpool) {
+						dhd_os_sdlock_rxq(bus->dhd);
+						PKTFREE(bus->dhd->osh, pkt, FALSE);
+						dhd_os_sdunlock_rxq(bus->dhd);
+					}
+					continue;
+				} else {
+					DHD_ERROR(("%s (nextlen): readahead on control"
+					           " packet %d?\n", __FUNCTION__, seq));
+					/* Force retry w/normal header read */
+					bus->nextlen = 0;
+					dhdsdio_rxfail(bus, FALSE, TRUE);
+					dhd_os_sdlock_rxq(bus->dhd);
+					PKTFREE2();
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			}
+
+			if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+				DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+				           "rx pktbuf's or not yet malloced.\n", len, chan));
+				continue;
+			}
+
+			/* Validate data offset */
+			if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+				DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+				           __FUNCTION__, doff, len, SDPCM_HDRLEN));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				ASSERT(0);
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				continue;
+			}
+
+			/* All done with this one -- now deliver the packet */
+			goto deliver;
+		}
+		/* gSPI frames should not be handled in fractions */
+		if (bus->bus == SPI_BUS) {
+			break;
+		}
+
+		/* Read frame header (hardware and software) */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            bus->rxhdr, firstread, NULL, NULL, NULL);
+		bus->f2rxhdrs++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+			bus->rx_hdrfail++;
+			dhdsdio_rxfail(bus, TRUE, TRUE);
+			continue;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+			prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Extract hardware header fields */
+		len = ltoh16_ua(bus->rxhdr);
+		check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+		/* All zeros means no more frames */
+		if (!(len|check)) {
+			*finished = TRUE;
+			break;
+		}
+
+		/* Validate check bytes */
+		if ((uint16)~(len^check)) {
+			DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, len, check));
+			bus->rx_badhdr++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Validate frame length */
+		if (len < SDPCM_HDRLEN) {
+			DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+			continue;
+		}
+
+		/* Extract software header fields */
+		chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		/* Validate data offset */
+		if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+			DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+			           __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+			bus->rx_badhdr++;
+			ASSERT(0);
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Save the readahead length if there is one */
+		bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+
+		/* Handle Flow Control */
+		fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		delta = 0;
+		if (~bus->flowcontrol & fcbits) {
+			bus->fc_xoff++;
+			delta = 1;
+		}
+		if (bus->flowcontrol & ~fcbits) {
+			bus->fc_xon++;
+			delta = 1;
+		}
+
+		if (delta) {
+			bus->fc_rcvd++;
+			bus->flowcontrol = fcbits;
+		}
+
+		/* Check and update sequence number */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_seq + 2;
+		}
+		bus->tx_max = txmax;
+
+		/* Call a separate function for control frames */
+		if (chan == SDPCM_CONTROL_CHANNEL) {
+			dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+			continue;
+		}
+
+		ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+		       (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+		/* Length to read */
+		rdlen = (len > firstread) ? (len - firstread) : 0;
+
+		/* May pad read to blocksize for efficiency */
+		if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+			pad = bus->blocksize - (rdlen % bus->blocksize);
+			if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+			    ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+				rdlen += pad;
+		} else if (rdlen % DHD_SDALIGN) {
+			rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+		}
+
+		/* Satisfy length-alignment requirements */
+		if (forcealign && (rdlen & (ALIGNMENT - 1)))
+			rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+		if ((rdlen + firstread) > MAX_RX_DATASZ) {
+			/* Too long -- skip this frame */
+			DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+			bus->dhd->rx_errors++; bus->rx_toolong++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+			/* Give up on data, request rtx of events */
+			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			           __FUNCTION__, rdlen, chan));
+			bus->dhd->rx_dropped++;
+			dhd_os_sdunlock_rxq(bus->dhd);
+			dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+			continue;
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		ASSERT(!PKTLINK(pkt));
+
+		/* Leave room for what we already read, and align remainder */
+		ASSERT(firstread < (PKTLEN(osh, pkt)));
+		PKTPULL(osh, pkt, firstread);
+		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+		/* Read the remaining frame data */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+		bus->f2rxdata++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+			           ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+			            ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+			continue;
+		}
+
+		/* Copy the already-read portion */
+		PKTPUSH(osh, pkt, firstread);
+		bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			prhex("Rx Data", PKTDATA(osh, pkt), len);
+		}
+#endif
+
+deliver:
+		/* Save superframe descriptor and allocate packet frame */
+		if (chan == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+				DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+				          __FUNCTION__, len));
+#ifdef DHD_DEBUG
+				if (DHD_GLOM_ON()) {
+					prhex("Glom Data", PKTDATA(osh, pkt), len);
+				}
+#endif
+				PKTSETLEN(osh, pkt, len);
+				ASSERT(doff == SDPCM_HDRLEN);
+				PKTPULL(osh, pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+			}
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		PKTSETLEN(osh, pkt, len);
+		PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+		/* Test channel packets are processed separately */
+		if (chan == SDPCM_TEST_CHANNEL) {
+			dhdsdio_testrcv(bus, pkt, seq);
+			continue;
+		}
+#endif /* SDTEST */
+
+		if (PKTLEN(osh, pkt) == 0) {
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			continue;
+		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
+			DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			continue;
+		}
+
+
+		/* Unlock during rx call */
+		dhd_os_sdunlock(bus->dhd);
+		dhd_rx_frame(bus->dhd, ifidx, pkt, 1);
+		dhd_os_sdlock(bus->dhd);
+	}
+	rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+	/* Message if we hit the limit */
+	if (!rxleft && !sdtest)
+		DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+	else
+#endif /* DHD_DEBUG */
+	DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rxseq--;
+	bus->rx_seq = rxseq;
+
+	return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus = 0;
+	uint32 hmb_data;
+	uint8 fcbits;
+	uint retries = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Read mailbox data and ack that we did so */
+	R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+	if (retries <= retry_limit)
+		W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+	bus->f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+		if (!bus->rxskip) {
+			DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+		}
+		bus->rxskip = FALSE;
+		intstatus |= I_HMB_FRAME_IND;
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+			           bus->sdpcm_ver, SDPCM_PROT_VERSION));
+		else
+			DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.  Leae this here for possibly remaining backward
+	 * compatible with older dongles
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->fc_xoff++;
+		if (bus->flowcontrol & ~fcbits)
+			bus->fc_xon++;
+
+		bus->fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+	                 HMB_DATA_NAKHANDLED |
+	                 HMB_DATA_FC |
+	                 HMB_DATA_FWREADY |
+	                 HMB_DATA_FCDATA_MASK |
+	                 HMB_DATA_VERSION_MASK)) {
+		DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+	}
+
+	return intstatus;
+}
+
+bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus, newstatus = 0;
+	uint retries = 0;
+	uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+	uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+	uint framecnt = 0;		  /* Temporary counter of tx/rx frames */
+	bool rxdone = TRUE;		  /* Flag for no more read data */
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Start with leftover status bits */
+	intstatus = bus->intstatus;
+
+	dhd_os_sdlock(bus->dhd);
+
+	/* If waiting for HTAVAIL, check status */
+	if (bus->clkstate == CLK_PENDING) {
+		int err;
+		uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+		/* Check for inconsistent device control */
+		devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		} else {
+			ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+		}
+#endif /* DHD_DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			if (err) {
+				DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			bus->clkstate = CLK_AVAIL;
+		} else {
+			goto clkwait;
+		}
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+	if (bus->clkstate == CLK_PENDING)
+		goto clkwait;
+
+	/* Pending interrupt indicates new device status */
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata++;
+		if (bcmsdh_regfail(bus->sdh))
+			newstatus = 0;
+		newstatus &= bus->hostintmask;
+		bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+		if (newstatus) {
+			W_SDREG(newstatus, &regs->intstatus, retries);
+			bus->f1regdata++;
+		}
+	}
+
+	/* Merge new bits with previous */
+	intstatus |= newstatus;
+	bus->intstatus = 0;
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata += 2;
+		bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus);
+	}
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		DHD_ERROR(("Dongle reports SBINT\n"));
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip)
+		intstatus &= ~I_HMB_FRAME_IND;
+
+	/* On frame indication, read available frames */
+	if (PKT_AVAILABLE()) {
+		framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+		if (rxdone || bus->rxskip)
+			intstatus &= ~I_HMB_FRAME_IND;
+		rxlimit -= MIN(framecnt, rxlimit);
+	}
+
+	/* Keep still-pending events for next scheduling */
+	bus->intstatus = intstatus;
+
+clkwait:
+	/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+	 * or clock availability.  (Allows tx loop to check ipend if desired.)
+	 * (Unless register access seems hosed, as we may not be able to ACK...)
+	 */
+	if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+		DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+		          __FUNCTION__, rxdone, framecnt));
+		bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+		bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+		bcmsdh_intr_enable(sdh);
+	}
+
+	if (DATAOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+		int ret, i;
+
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                      (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+			NULL, NULL, NULL);
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+			          __FUNCTION__, ret));
+			bus->tx_sderrs++;
+
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+			                 SFC_WF_TERM, NULL);
+			bus->f1regdata++;
+
+			for (i = 0; i < 3; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+
+		}
+		if (ret == 0) {
+				bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+		}
+
+		printf("Return_dpc value is : %d\n", ret);
+		bus->ctrl_frame_stat = FALSE;
+		dhd_wait_event_wakeup(bus->dhd);
+	}
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+	    pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+		framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+		framecnt = dhdsdio_sendfromq(bus, framecnt);
+		txlimit -= framecnt;
+	}
+
+	/* Resched if events or tx frames are pending, else await next interrupt */
+	/* On failed register access, all bets are off: no resched or interrupts */
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+		DHD_ERROR(("%s: failed backplane access over SDIO, halting operation %d \n",
+		           __FUNCTION__, bcmsdh_regfail(sdh)));
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->intstatus = 0;
+	} else if (bus->clkstate == CLK_PENDING) {
+		DHD_INFO(("%s: rescheduled due to CLK_PENDING awaiting \
+			I_CHIPACTIVE interrupt", __FUNCTION__));
+			resched = TRUE;
+	} else if (bus->intstatus || bus->ipend ||
+	           (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+			PKT_AVAILABLE()) {  /* Read multiple frames */
+		resched = TRUE;
+	}
+
+
+	bus->dpc_sched = resched;
+
+	/* If we're done for now, turn off clock request. */
+	if ((bus->clkstate != CLK_PENDING) && bus->idletime == DHD_IDLE_IMMEDIATE) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched;
+
+	/* Call the DPC directly. */
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	resched = dhdsdio_dpc(bus);
+
+	return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus) {
+		DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+		return;
+	}
+	sdh = bus->sdh;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return;
+	}
+	/* Count the interrupt call */
+	bus->intrcount++;
+	bus->ipend = TRUE;
+
+	/* Shouldn't get this interrupt if we're sleeping? */
+	if (bus->sleeping) {
+		DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+		return;
+	}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (bus->intr) {
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	} else {
+		DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+	}
+
+	bcmsdh_intr_disable(sdh);
+	bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	dhd_os_wake_lock(bus->dhd);
+	while (dhdsdio_dpc(bus));
+	dhd_os_wake_unlock(bus->dhd);
+#else
+	bus->dpc_sched = TRUE;
+	dhd_sched_dpc(bus->dhd);
+#endif 
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+	/* Default to specified length, or full range */
+	if (dhd_pktgen_len) {
+		bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+		bus->pktgen_minlen = bus->pktgen_maxlen;
+	} else {
+		bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+		bus->pktgen_minlen = 0;
+	}
+	bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+	/* Default to per-watchdog burst with 10s print time */
+	bus->pktgen_freq = 1;
+	bus->pktgen_print = 10000 / dhd_watchdog_ms;
+	bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+	/* Default to echo mode */
+	bus->pktgen_mode = DHD_PKTGEN_ECHO;
+	bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+	void *pkt;
+	uint8 *data;
+	uint pktcount;
+	uint fillbyte;
+	osl_t *osh = bus->dhd->osh;
+	uint16 len;
+
+	/* Display current count if appropriate */
+	if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+		bus->pktgen_ptick = 0;
+		printf("%s: send attempts %d rcvd %d\n",
+		       __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd);
+	}
+
+	/* For recv mode, just make sure dongle has started sending */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (!bus->pktgen_rcvd)
+			dhdsdio_sdtest_set(bus, TRUE);
+		return;
+	}
+
+	/* Otherwise, generate or request the specified number of packets */
+	for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+		/* Stop if total has been reached */
+		if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			break;
+		}
+
+		/* Allocate an appropriate-sized packet */
+		len = bus->pktgen_len;
+		if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+		                   TRUE))) {;
+			DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+			break;
+		}
+		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+		data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+		/* Write test header cmd and extra based on mode */
+		switch (bus->pktgen_mode) {
+		case DHD_PKTGEN_ECHO:
+			*data++ = SDPCM_TEST_ECHOREQ;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_SEND:
+			*data++ = SDPCM_TEST_DISCARD;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_RXBURST:
+			*data++ = SDPCM_TEST_BURST;
+			*data++ = (uint8)bus->pktgen_count;
+			break;
+
+		default:
+			DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+			PKTFREE(osh, pkt, TRUE);
+			bus->pktgen_count = 0;
+			return;
+		}
+
+		/* Write test header length field */
+		*data++ = (len >> 0);
+		*data++ = (len >> 8);
+
+		/* Then fill in the remainder -- N/A for burst, but who cares... */
+		for (fillbyte = 0; fillbyte < len; fillbyte++)
+			*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+			prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Send it */
+		if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) {
+			bus->pktgen_fail++;
+			if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+				bus->pktgen_count = 0;
+		}
+		bus->pktgen_sent++;
+
+		/* Bump length if not fixed, wrap at max */
+		if (++bus->pktgen_len > bus->pktgen_maxlen)
+			bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+		/* Special case for burst mode: just send one request! */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+			break;
+	}
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
+{
+	void *pkt;
+	uint8 *data;
+	osl_t *osh = bus->dhd->osh;
+
+	/* Allocate the packet */
+	if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) {
+		DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+		return;
+	}
+	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+	data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+	/* Fill in the test header */
+	*data++ = SDPCM_TEST_SEND;
+	*data++ = start;
+	*data++ = (bus->pktgen_maxlen >> 0);
+	*data++ = (bus->pktgen_maxlen >> 8);
+
+	/* Send it */
+	if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE))
+		bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint8 *data;
+	uint pktlen;
+
+	uint8 cmd;
+	uint8 extra;
+	uint16 len;
+	uint16 offset;
+
+	/* Check for min length */
+	if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+		PKTFREE(osh, pkt, FALSE);
+		return;
+	}
+
+	/* Extract header fields */
+	data = PKTDATA(osh, pkt);
+	cmd = *data++;
+	extra = *data++;
+	len = *data++; len += *data++ << 8;
+
+	/* Check length for relevant commands */
+	if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+		if (pktlen != len + SDPCM_TEST_HDRLEN) {
+			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+			           " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+			PKTFREE(osh, pkt, FALSE);
+			return;
+		}
+	}
+
+	/* Process as per command */
+	switch (cmd) {
+	case SDPCM_TEST_ECHOREQ:
+		/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+		*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+		if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) {
+			bus->pktgen_sent++;
+		} else {
+			bus->pktgen_fail++;
+			PKTFREE(osh, pkt, FALSE);
+		}
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_ECHORSP:
+		if (bus->ext_loop) {
+			PKTFREE(osh, pkt, FALSE);
+			bus->pktgen_rcvd++;
+			break;
+		}
+
+		for (offset = 0; offset < len; offset++, data++) {
+			if (*data != SDPCM_TEST_FILL(offset, extra)) {
+				DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+				           "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+				           offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+				break;
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_DISCARD:
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_BURST:
+	case SDPCM_TEST_SEND:
+	default:
+		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+		          " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+		PKTFREE(osh, pkt, FALSE);
+		break;
+	}
+
+	/* For recv mode, stop at limie (and tell dongle to stop sending) */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_total && (bus->pktgen_rcvd >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			dhdsdio_sdtest_set(bus, FALSE);
+		}
+	}
+}
+#endif /* SDTEST */
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus;
+
+	DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+	bus = dhdp->bus;
+
+	if (bus->dhd->dongle_reset)
+		return FALSE;
+
+	/* Ignore the timer if simulating bus down */
+	if (bus->sleeping)
+		return FALSE;
+
+	/* Poll period: check device if appropriate. */
+	if (bus->poll && (++bus->polltick >= bus->pollrate)) {
+		uint32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+			if (!bus->dpc_sched) {
+				uint8 devpend;
+				devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+				                          SDIOD_CCCR_INTPEND, NULL);
+				intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and schedule the DPC */
+			if (intstatus) {
+				bus->pollcnt++;
+				bus->ipend = TRUE;
+				if (bus->intr) {
+					bcmsdh_intr_disable(bus->sdh);
+				}
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->lastintrs = bus->intrcount;
+	}
+
+#ifdef DHD_DEBUG
+	/* Poll for console output periodically */
+	if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+			if (dhdsdio_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+	/* Generate packets if configured */
+	if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+		/* Make sure backplane clock is on */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		bus->pktgen_tick = 0;
+		dhdsdio_pktgen(bus);
+	}
+#endif
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+	if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+		if (++bus->idlecount >= bus->idletime) {
+			bus->idlecount = 0;
+			if (bus->activity) {
+				bus->activity = FALSE;
+				dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+			}
+		}
+	}
+
+	return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	uint32 addr, val;
+	int rv;
+	void *pkt;
+
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Exclusive bus access */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	BUS_WAKE(bus);
+	/* No pend allowed since txpkt is called later, ht clk has to be on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Bump dongle by sending an empty event pkt.
+	 * sdpcm_sendup (RX) checks for virtual console input.
+	 */
+	if (((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) &&
+		bus->clkstate == CLK_AVAIL)
+		dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE);
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+	uint byte, tag, tdata;
+	DHD_INFO(("Function %d CIS:\n", fn));
+
+	for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+		if ((byte % 16) == 0)
+			DHD_INFO(("    "));
+		DHD_INFO(("%02x ", cis[byte]));
+		if ((byte % 16) == 15)
+			DHD_INFO(("\n"));
+		if (!tdata--) {
+			tag = cis[byte];
+			if (tag == 0xff)
+				break;
+			else if (!tag)
+				tdata = 0;
+			else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+				tdata = cis[byte + 1] + 1;
+			else
+				DHD_INFO(("]"));
+		}
+	}
+	if ((byte % 16) != 15)
+		DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+	if (chipid == BCM4325_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4329_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4315_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4319_CHIP_ID)
+		return TRUE;
+	return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+	uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+	int ret;
+	dhd_bus_t *bus;
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_txbound = DHD_TXBOUND;
+	dhd_rxbound = DHD_RXBOUND;
+	dhd_alignctl = TRUE;
+	sd1idle = TRUE;
+	dhd_readahead = TRUE;
+	retrydata = FALSE;
+	dhd_doflow = FALSE;
+	dhd_dongle_memsize = 0;
+	dhd_txminmax = DHD_TXMINMAX;
+
+	forcealign = TRUE;
+
+
+	dhd_common_init();
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+	/* We make assumptions about address window mappings */
+	ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+	/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+	 * means early parse could fail, so here we should get either an ID
+	 * we recognize OR (-1) indicating we must request power first.
+	 */
+	/* Check the Vendor ID */
+	switch (venid) {
+		case 0x0000:
+		case VENDOR_BROADCOM:
+			break;
+		default:
+			DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+			           __FUNCTION__, venid));
+			return NULL;
+	}
+
+	/* Check the Device ID and make sure it's one that we support */
+	switch (devid) {
+		case BCM4325_D11DUAL_ID:		/* 4325 802.11a/g id */
+		case BCM4325_D11G_ID:			/* 4325 802.11g 2.4Ghz band id */
+		case BCM4325_D11A_ID:			/* 4325 802.11a 5Ghz band id */
+			DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4329_D11NDUAL_ID:		/* 4329 802.11n dualband device */
+		case BCM4329_D11N2G_ID:		/* 4329 802.11n 2.4G device */
+		case BCM4329_D11N5G_ID:		/* 4329 802.11n 5G device */
+		case 0x4329:
+			DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4315_D11DUAL_ID:		/* 4315 802.11a/g id */
+		case BCM4315_D11G_ID:			/* 4315 802.11g id */
+		case BCM4315_D11A_ID:			/* 4315 802.11a id */
+			DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4319_D11N_ID:			/* 4319 802.11n id */
+		case BCM4319_D11N2G_ID:			/* 4319 802.11n2g id */
+		case BCM4319_D11N5G_ID:			/* 4319 802.11n5g id */
+			DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+			break;
+		case 0:
+			DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+			          __FUNCTION__));
+			break;
+
+		default:
+			DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+			           __FUNCTION__, venid, devid));
+			return NULL;
+	}
+
+	if (osh == NULL) {
+		/* Ask the OS interface part for an OSL handle */
+		if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
+			DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+			return NULL;
+		}
+	}
+
+	/* Allocate private bus interface state */
+	if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+		DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+		goto fail;
+	}
+	bzero(bus, sizeof(dhd_bus_t));
+	bus->sdh = sdh;
+	bus->cl_devid = (uint16)devid;
+	bus->bus = DHD_BUS;
+	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+	bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+	/* attempt to attach to the dongle */
+	if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+		DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Attach to the dhd/OS/network interface */
+	if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+		DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Allocate buffers */
+	if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Register interrupt callback, but mask it (not operational yet). */
+	DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+	bcmsdh_intr_disable(sdh);
+	if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+		DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+		           __FUNCTION__, ret));
+		goto fail;
+	}
+	DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+
+	DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+
+	/* if firmware path present try to download and bring up bus */
+	if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+#if 1
+		DHD_ERROR(("%s: failed\n", __FUNCTION__));
+		goto fail;
+#else
+		if (ret == BCME_NOTUP)  {
+			DHD_ERROR(("%s: dongle is not responding\n", __FUNCTION__));
+			goto fail;
+		}
+#endif
+	}
+	/* Ok, have the per-port tell the stack we're open for business */
+	if (dhd_net_attach(bus->dhd, 0) != 0) {
+		DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	return bus;
+
+fail:
+	dhdsdio_release(bus, osh);
+	return NULL;
+}
+
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+                    uint16 devid)
+{
+	uint8 clkctl = 0;
+	int err = 0;
+
+	bus->alp_only = TRUE;
+
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+		DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+	}
+
+#ifdef DHD_DEBUG
+	printf("F1 signature read @0x18000000=0x%4x\n",
+	       bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4));
+
+
+#endif /* DHD_DEBUG */
+
+
+	/* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+	if (!err)
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+		DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+		           err, DHD_INIT_CLKCTL1, clkctl));
+		goto fail;
+	}
+
+
+#ifdef DHD_DEBUG
+	if (DHD_INFO_ON()) {
+		uint fn, numfn;
+		uint8 *cis[SDIOD_MAX_IOFUNCS];
+		int err = 0;
+
+		numfn = bcmsdh_query_iofnum(sdh);
+		ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+		/* Make sure ALP is available before trying to read CIS */
+		SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+		                                    SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+		          !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+		/* Now request ALP be put on the bus */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 DHD_INIT_CLKCTL2, &err);
+		OSL_DELAY(65);
+
+		for (fn = 0; fn <= numfn; fn++) {
+			if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+				break;
+			}
+			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+			if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+				MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+				break;
+			}
+			dhd_dump_cis(fn, cis[fn]);
+		}
+
+		while (fn-- > 0) {
+			ASSERT(cis[fn]);
+			MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+		}
+
+		if (err) {
+			DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+			goto fail;
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+	if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+		DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+		           __FUNCTION__, bus->sih->chip));
+		goto fail;
+	}
+
+	si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+	/* Get info on the ARM and SOCRAM cores... */
+	if (!DHD_NOPMU(bus)) {
+		if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			bus->armrev = si_corerev(bus->sih);
+		} else {
+			DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+			goto fail;
+		}
+		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+		bus->ramsize = bus->orig_ramsize;
+		if (dhd_dongle_memsize)
+			dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+		DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+			bus->ramsize, bus->orig_ramsize));
+	}
+
+	/* ...but normally deal with the SDPCMDEV core */
+	if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+	    !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+		DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->sdpcmrev = si_corerev(bus->sih);
+
+	/* Set core control so an SDIO reset does a backplane reset */
+	OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+
+	pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifndef DHD_USE_STATIC_BUF
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = MALLOC(osh, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = MALLOC(osh, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen) MFREE(osh, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+#else
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = dhd_os_prealloc(DHD_PREALLOC_RXBUF, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = dhd_os_prealloc(DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		goto fail;
+	}
+#endif /* DHD_USE_STATIC_BUF */
+
+	/* Align the buffer */
+	if ((uintptr)bus->databuf % DHD_SDALIGN)
+		bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+	else
+		bus->dataptr = bus->databuf;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	int32 fnum;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef SDTEST
+	dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	bus->sleeping = FALSE;
+	bus->rxflow = FALSE;
+	bus->prev_rxlim_hit = 0;
+
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = (int32)dhd_idletime;
+	bus->idleclock = DHD_IDLE_ACTIVE;
+
+	/* Query the SD clock speed */
+	if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+	                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+		bus->sd_divisor = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_divisor", bus->sd_divisor));
+	}
+
+	/* Query the SD bus mode */
+	if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+	                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+		bus->sd_mode = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_mode", bus->sd_mode));
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	fnum = 2;
+	if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+	                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+		bus->blocksize = 0;
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_blocksize", bus->blocksize));
+	}
+	bus->roundup = MIN(max_roundup, bus->blocksize);
+
+	/* Query if bus module supports packet chaining, default to use if supported */
+	if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+	                    &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+		bus->sd_rxchain = FALSE;
+	} else {
+		DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+		          __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+	}
+	bus->use_rxchain = (bool)bus->sd_rxchain;
+
+	return TRUE;
+}
+
+bool
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *fw_path, char *nv_path)
+{
+	bool ret;
+	bus->fw_path = fw_path;
+	bus->nv_path = nv_path;
+
+	ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+	return ret;
+}
+
+static bool
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	bool ret;
+
+	/* Download the firmware */
+	dhd_os_wake_lock(bus->dhd);
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	ret = _dhdsdio_download_firmware(bus) == 0;
+
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	dhd_os_wake_unlock(bus->dhd);
+	return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(osh);
+
+
+		/* De-register interrupt handler */
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_intr_dereg(bus->sdh);
+
+		if (bus->dhd) {
+
+			dhdsdio_release_dongle(bus, osh, TRUE);
+
+			dhd_detach(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		dhdsdio_release_malloc(bus, osh);
+
+
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+
+	if (osh)
+		dhd_osl_detach(osh);
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->rxbuf) {
+#ifndef DHD_USE_STATIC_BUF
+		MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+		bus->rxctl = bus->rxbuf = NULL;
+		bus->rxlen = 0;
+	}
+
+	if (bus->databuf) {
+#ifndef DHD_USE_STATIC_BUF
+		MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+		bus->databuf = NULL;
+	}
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, int reset_flag)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+		return;
+
+	if (bus->sih) {
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+#if !defined(BCMLXSDMMC)
+		si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		si_detach(bus->sih);
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(bus->dhd);
+		dhdsdio_release(bus, bus->dhd->osh);
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+	dhdsdio_probe,
+	dhdsdio_disconnect
+};
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_unregister();
+}
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+
+	DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+	/* Download image */
+	while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, dlarray + offset, MEMBLOCK);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+	if (offset < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			dlarray + offset, sizeof(dlarray) - offset);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+			goto err;
+		}
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		unsigned char *ularray;
+
+		ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+		/* Upload image to verify downloaded contents. */
+		offset = 0;
+		memset(ularray, 0xaa, bus->ramsize);
+		while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+
+			offset += MEMBLOCK;
+		}
+
+		if (offset < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+				ularray + offset, sizeof(dlarray) - offset);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+				goto err;
+			}
+		}
+
+		if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+			DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
+			ASSERT(0);
+			goto err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+
+		MFREE(bus->dhd->osh, ularray, bus->ramsize);
+	}
+#endif /* DHD_DEBUG */
+
+err:
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *fw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	uint len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, fw_path));
+
+	image = dhd_os_open_image(fw_path);
+	if (image == NULL)
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs.  End of buffer is marked by two NULs.
+*/
+
+static uint
+process_nvram_vars(char *varbuf, uint len)
+{
+	char *dp;
+	bool findNewline;
+	int column;
+	uint buf_len, n;
+
+	dp = varbuf;
+
+	findNewline = FALSE;
+	column = 0;
+
+	for (n = 0; n < len; n++) {
+		if (varbuf[n] == 0)
+			break;
+		if (varbuf[n] == '\r')
+			continue;
+		if (findNewline && varbuf[n] != '\n')
+			continue;
+		findNewline = FALSE;
+		if (varbuf[n] == '#') {
+			findNewline = TRUE;
+			continue;
+		}
+		if (varbuf[n] == '\n') {
+			if (column == 0)
+				continue;
+			*dp++ = 0;
+			column = 0;
+			continue;
+		}
+		*dp++ = varbuf[n];
+		column++;
+	}
+	buf_len = dp - varbuf;
+
+	while (dp < varbuf + n)
+		*dp++ = 0;
+
+	return buf_len;
+}
+
+/*
+	EXAMPLE: nvram_array
+	nvram_arry format:
+	name=value
+	Use carriage return at the end of each assignment, and an empty string with
+	carriage return at the end of array.
+
+	For example:
+	unsigned char  nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+	Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+	Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+	bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *nv_path;
+	bool nvram_file_exists;
+
+	nv_path = bus->nv_path;
+
+	nvram_file_exists = ((nv_path != NULL) && (nv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(nv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MEMBLOCK);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MEMBLOCK, image);
+	}
+	else {
+		len = strlen(bus->nvram_params);
+		ASSERT(len <= MEMBLOCK);
+		if (len > MEMBLOCK)
+			len = MEMBLOCK;
+		memcpy(memblock, bus->nvram_params, len);
+	}
+
+	if (len > 0 && len < MEMBLOCK) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+		len = process_nvram_vars(bufp, len);
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_SDIO_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		return bcmerror;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdsdio_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdsdio_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+	/* External nvram takes precedence if specified */
+	if (dhdsdio_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+	}
+
+	/* Take arm out of reset */
+	if (dhdsdio_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	int status;
+
+	/* 4329: GSPI check */
+	status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+	return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+	return SDPCM_HDRLEN;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+
+	if (flag == TRUE) {
+		if (!bus->dhd->dongle_reset) {
+			dhd_os_sdlock(dhdp);
+			/* Turning off watchdog */
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(bus->dhd, 0, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			/* Expect app to have torn down any connection before calling */
+			/* Stop the bus, disable F2 */
+			dhd_bus_stop(bus, FALSE);
+#if defined(OOB_INTR_ONLY)
+			bcmsdh_set_irq(FALSE);
+#endif /* defined(OOB_INTR_ONLY) */
+			/* Clean tx/rx buffer pointers, detach from the dongle */
+			dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE);
+
+			bus->dhd->dongle_reset = TRUE;
+			bus->dhd->up = FALSE;
+			dhd_os_sdunlock(dhdp);
+
+			DHD_TRACE(("%s:  WLAN OFF DONE\n", __FUNCTION__));
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_SDIO_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+
+		DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+		if (bus->dhd->dongle_reset) {
+			/* Turn on WLAN */
+			dhd_os_sdlock(dhdp);
+
+			/* Reset SD client */
+			bcmsdh_reset(bus->sdh);
+
+			/* Attempt to re-attach & download */
+			if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+			                        (uint32 *)SI_ENUM_BASE,
+			                        bus->cl_devid)) {
+				/* Attempt to download binary to the dongle */
+				if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+					dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+
+					/* Re-init bus, enable F2 transfer */
+					bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+					if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+						bcmsdh_set_irq(TRUE);
+						dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+						bus->dhd->dongle_reset = FALSE;
+						bus->dhd->up = TRUE;
+#if !defined(IGNORE_ETH0_DOWN)
+						/* Restore flow control  */
+						dhd_txflowcontrol(bus->dhd, 0, OFF);
+#endif
+						/* Turning on watchdog back */
+						dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+						DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+					} else {
+						dhd_bus_stop(bus, FALSE);
+						dhdsdio_release_dongle(bus, bus->dhd->osh, FALSE);
+					}
+				} else
+					bcmerror = BCME_SDIO_ERROR;
+			} else
+				bcmerror = BCME_SDIO_ERROR;
+
+			dhd_os_sdunlock(dhdp);
+		} else {
+			bcmerror = BCME_NOTDOWN;
+			DHD_ERROR(("%s: Set DEVRESET=FALSE invoked when device is on\n",
+				__FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+		}
+	}
+	return bcmerror;
+}
diff --git a/drivers/net/wireless/bcm4329/dngl_stats.h b/drivers/net/wireless/bcm4329/dngl_stats.h
new file mode 100644
index 0000000..e5db54e
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h,v 1.2.140.3 2008/05/26 16:52:08 Exp $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+	unsigned long	rx_packets;		/* total packets received */
+	unsigned long	tx_packets;		/* total packets transmitted */
+	unsigned long	rx_bytes;		/* total bytes received */
+	unsigned long	tx_bytes;		/* total bytes transmitted */
+	unsigned long	rx_errors;		/* bad packets received */
+	unsigned long	tx_errors;		/* packet transmit problems */
+	unsigned long	rx_dropped;		/* packets dropped by dongle */
+	unsigned long	tx_dropped;		/* packets dropped by dongle */
+	unsigned long   multicast;      /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcm4329/hndpmu.c b/drivers/net/wireless/bcm4329/hndpmu.c
new file mode 100644
index 0000000..307347a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/hndpmu.c
@@ -0,0 +1,131 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c,v 1.95.2.17.4.11.2.63 2010/07/21 13:55:09 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+/* debug/trace */
+#define	PMU_ERROR(args)
+
+#define	PMU_MSG(args)
+
+
+/* SDIO Pad drive strength to select value mappings */
+typedef struct {
+	uint8 strength;			/* Pad Drive Strength in mA */
+	uint8 sel;			/* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+	{4, 0x2},
+	{2, 0x3},
+	{1, 0x0},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
+
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+	chipcregs_t *cc;
+	uint origidx, intr_val = 0;
+	sdiod_drive_str_t *str_tab = NULL;
+	uint32 str_mask = 0;
+	uint32 str_shift = 0;
+
+	if (!(sih->cccaps & CC_CAP_PMU)) {
+		return;
+	}
+
+	/* Remember original core before switch to chipc */
+	cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+	switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+		str_mask = 0x30000000;
+		str_shift = 28;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+	case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+
+	default:
+		PMU_MSG(("No SDIO Drive strength init done for chip %x rev %d pmurev %d\n",
+		         sih->chip, sih->chiprev, sih->pmurev));
+
+		break;
+	}
+
+	if (str_tab != NULL) {
+		uint32 drivestrength_sel = 0;
+		uint32 cc_data_temp;
+		int i;
+
+		for (i = 0; str_tab[i].strength != 0; i ++) {
+			if (drivestrength >= str_tab[i].strength) {
+				drivestrength_sel = str_tab[i].sel;
+				break;
+			}
+		}
+
+		W_REG(osh, &cc->chipcontrol_addr, 1);
+		cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+		cc_data_temp &= ~str_mask;
+		drivestrength_sel <<= str_shift;
+		cc_data_temp |= drivestrength_sel;
+		W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+		PMU_MSG(("SDIO: %dmA drive strength selected, set to 0x%08x\n",
+		         drivestrength, cc_data_temp));
+	}
+
+	/* Return to original core */
+	si_restore_core(sih, origidx, intr_val);
+}
diff --git a/drivers/net/wireless/bcm4329/include/Makefile b/drivers/net/wireless/bcm4329/include/Makefile
new file mode 100644
index 0000000..439ead1
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/Makefile
@@ -0,0 +1,21 @@
+#
+# include/Makefile
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile,v 13.5 2005/02/17 19:11:31 Exp $
+#
+
+SRCBASE	= ..
+
+TARGETS	= epivers.h
+
+
+all release:
+	bash epivers.sh
+
+clean:
+	rm -rf ${TARGETS} *.prev
+
+
+.PHONY: all release clean
diff --git a/drivers/net/wireless/bcm4329/include/aidmp.h b/drivers/net/wireless/bcm4329/include/aidmp.h
new file mode 100644
index 0000000..a927e5d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/aidmp.h
@@ -0,0 +1,368 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h,v 13.2.10.1 2008/05/07 20:32:12 Exp $
+ */
+
+
+#ifndef	_AIDMP_H
+#define	_AIDMP_H
+
+
+#define	MFGID_ARM		0x43b
+#define	MFGID_BRCM		0x4bf
+#define	MFGID_MIPS		0x4a7
+
+
+#define	CC_SIM			0
+#define	CC_EROM			1
+#define	CC_CORESIGHT		9
+#define	CC_VERIF		0xb
+#define	CC_OPTIMO		0xd
+#define	CC_GEN			0xe
+#define	CC_PRIMECELL		0xf
+
+
+#define	ER_EROMENTRY		0x000
+#define	ER_REMAPCONTROL		0xe00
+#define	ER_REMAPSELECT		0xe04
+#define	ER_MASTERSELECT		0xe10
+#define	ER_ITCR			0xf00
+#define	ER_ITIP			0xf04
+
+
+#define	ER_TAG			0xe
+#define	ER_TAG1			0x6
+#define	ER_VALID		1
+#define	ER_CI			0
+#define	ER_MP			2
+#define	ER_ADD			4
+#define	ER_END			0xe
+#define	ER_BAD			0xffffffff
+
+
+#define	CIA_MFG_MASK		0xfff00000
+#define	CIA_MFG_SHIFT		20
+#define	CIA_CID_MASK		0x000fff00
+#define	CIA_CID_SHIFT		8
+#define	CIA_CCL_MASK		0x000000f0
+#define	CIA_CCL_SHIFT		4
+
+
+#define	CIB_REV_MASK		0xff000000
+#define	CIB_REV_SHIFT		24
+#define	CIB_NSW_MASK		0x00f80000
+#define	CIB_NSW_SHIFT		19
+#define	CIB_NMW_MASK		0x0007c000
+#define	CIB_NMW_SHIFT		14
+#define	CIB_NSP_MASK		0x00003e00
+#define	CIB_NSP_SHIFT		9
+#define	CIB_NMP_MASK		0x000001f0
+#define	CIB_NMP_SHIFT		4
+
+
+#define	MPD_MUI_MASK		0x0000ff00
+#define	MPD_MUI_SHIFT		8
+#define	MPD_MP_MASK		0x000000f0
+#define	MPD_MP_SHIFT		4
+
+
+#define	AD_ADDR_MASK		0xfffff000
+#define	AD_SP_MASK		0x00000f00
+#define	AD_SP_SHIFT		8
+#define	AD_ST_MASK		0x000000c0
+#define	AD_ST_SHIFT		6
+#define	AD_ST_SLAVE		0x00000000
+#define	AD_ST_BRIDGE		0x00000040
+#define	AD_ST_SWRAP		0x00000080
+#define	AD_ST_MWRAP		0x000000c0
+#define	AD_SZ_MASK		0x00000030
+#define	AD_SZ_SHIFT		4
+#define	AD_SZ_4K		0x00000000
+#define	AD_SZ_8K		0x00000010
+#define	AD_SZ_16K		0x00000020
+#define	AD_SZ_SZD		0x00000030
+#define	AD_AG32			0x00000008
+#define	AD_ADDR_ALIGN		0x00000fff
+#define	AD_SZ_BASE		0x00001000	
+
+
+#define	SD_SZ_MASK		0xfffff000
+#define	SD_SG32			0x00000008
+#define	SD_SZ_ALIGN		0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+	uint32	oobselina30;	
+	uint32	oobselina74;	
+	uint32	PAD[6];
+	uint32	oobselinb30;	
+	uint32	oobselinb74;	
+	uint32	PAD[6];
+	uint32	oobselinc30;	
+	uint32	oobselinc74;	
+	uint32	PAD[6];
+	uint32	oobselind30;	
+	uint32	oobselind74;	
+	uint32	PAD[38];
+	uint32	oobselouta30;	
+	uint32	oobselouta74;	
+	uint32	PAD[6];
+	uint32	oobseloutb30;	
+	uint32	oobseloutb74;	
+	uint32	PAD[6];
+	uint32	oobseloutc30;	
+	uint32	oobseloutc74;	
+	uint32	PAD[6];
+	uint32	oobseloutd30;	
+	uint32	oobseloutd74;	
+	uint32	PAD[38];
+	uint32	oobsynca;	
+	uint32	oobseloutaen;	
+	uint32	PAD[6];
+	uint32	oobsyncb;	
+	uint32	oobseloutben;	
+	uint32	PAD[6];
+	uint32	oobsyncc;	
+	uint32	oobseloutcen;	
+	uint32	PAD[6];
+	uint32	oobsyncd;	
+	uint32	oobseloutden;	
+	uint32	PAD[38];
+	uint32	oobaextwidth;	
+	uint32	oobainwidth;	
+	uint32	oobaoutwidth;	
+	uint32	PAD[5];
+	uint32	oobbextwidth;	
+	uint32	oobbinwidth;	
+	uint32	oobboutwidth;	
+	uint32	PAD[5];
+	uint32	oobcextwidth;	
+	uint32	oobcinwidth;	
+	uint32	oobcoutwidth;	
+	uint32	PAD[5];
+	uint32	oobdextwidth;	
+	uint32	oobdinwidth;	
+	uint32	oobdoutwidth;	
+	uint32	PAD[37];
+	uint32	ioctrlset;	
+	uint32	ioctrlclear;	
+	uint32	ioctrl;		
+	uint32	PAD[61];
+	uint32	iostatus;	
+	uint32	PAD[127];
+	uint32	ioctrlwidth;	
+	uint32	iostatuswidth;	
+	uint32	PAD[62];
+	uint32	resetctrl;	
+	uint32	resetstatus;	
+	uint32	resetreadid;	
+	uint32	resetwriteid;	
+	uint32	PAD[60];
+	uint32	errlogctrl;	
+	uint32	errlogdone;	
+	uint32	errlogstatus;	
+	uint32	errlogaddrlo;	
+	uint32	errlogaddrhi;	
+	uint32	errlogid;	
+	uint32	errloguser;	
+	uint32	errlogflags;	
+	uint32	PAD[56];
+	uint32	intstatus;	
+	uint32	PAD[127];
+	uint32	config;		
+	uint32	PAD[63];
+	uint32	itcr;		
+	uint32	PAD[3];
+	uint32	itipooba;	
+	uint32	itipoobb;	
+	uint32	itipoobc;	
+	uint32	itipoobd;	
+	uint32	PAD[4];
+	uint32	itipoobaout;	
+	uint32	itipoobbout;	
+	uint32	itipoobcout;	
+	uint32	itipoobdout;	
+	uint32	PAD[4];
+	uint32	itopooba;	
+	uint32	itopoobb;	
+	uint32	itopoobc;	
+	uint32	itopoobd;	
+	uint32	PAD[4];
+	uint32	itopoobain;	
+	uint32	itopoobbin;	
+	uint32	itopoobcin;	
+	uint32	itopoobdin;	
+	uint32	PAD[4];
+	uint32	itopreset;	
+	uint32	PAD[15];
+	uint32	peripherialid4;	
+	uint32	peripherialid5;	
+	uint32	peripherialid6;	
+	uint32	peripherialid7;	
+	uint32	peripherialid0;	
+	uint32	peripherialid1;	
+	uint32	peripherialid2;	
+	uint32	peripherialid3;	
+	uint32	componentid0;	
+	uint32	componentid1;	
+	uint32	componentid2;	
+	uint32	componentid3;	
+} aidmp_t;
+
+#endif 
+
+
+#define	OOB_BUSCONFIG		0x020
+#define	OOB_STATUSA		0x100
+#define	OOB_STATUSB		0x104
+#define	OOB_STATUSC		0x108
+#define	OOB_STATUSD		0x10c
+#define	OOB_ENABLEA0		0x200
+#define	OOB_ENABLEA1		0x204
+#define	OOB_ENABLEA2		0x208
+#define	OOB_ENABLEA3		0x20c
+#define	OOB_ENABLEB0		0x280
+#define	OOB_ENABLEB1		0x284
+#define	OOB_ENABLEB2		0x288
+#define	OOB_ENABLEB3		0x28c
+#define	OOB_ENABLEC0		0x300
+#define	OOB_ENABLEC1		0x304
+#define	OOB_ENABLEC2		0x308
+#define	OOB_ENABLEC3		0x30c
+#define	OOB_ENABLED0		0x380
+#define	OOB_ENABLED1		0x384
+#define	OOB_ENABLED2		0x388
+#define	OOB_ENABLED3		0x38c
+#define	OOB_ITCR		0xf00
+#define	OOB_ITIPOOBA		0xf10
+#define	OOB_ITIPOOBB		0xf14
+#define	OOB_ITIPOOBC		0xf18
+#define	OOB_ITIPOOBD		0xf1c
+#define	OOB_ITOPOOBA		0xf30
+#define	OOB_ITOPOOBB		0xf34
+#define	OOB_ITOPOOBC		0xf38
+#define	OOB_ITOPOOBD		0xf3c
+
+
+#define	AI_OOBSELINA30		0x000
+#define	AI_OOBSELINA74		0x004
+#define	AI_OOBSELINB30		0x020
+#define	AI_OOBSELINB74		0x024
+#define	AI_OOBSELINC30		0x040
+#define	AI_OOBSELINC74		0x044
+#define	AI_OOBSELIND30		0x060
+#define	AI_OOBSELIND74		0x064
+#define	AI_OOBSELOUTA30		0x100
+#define	AI_OOBSELOUTA74		0x104
+#define	AI_OOBSELOUTB30		0x120
+#define	AI_OOBSELOUTB74		0x124
+#define	AI_OOBSELOUTC30		0x140
+#define	AI_OOBSELOUTC74		0x144
+#define	AI_OOBSELOUTD30		0x160
+#define	AI_OOBSELOUTD74		0x164
+#define	AI_OOBSYNCA		0x200
+#define	AI_OOBSELOUTAEN		0x204
+#define	AI_OOBSYNCB		0x220
+#define	AI_OOBSELOUTBEN		0x224
+#define	AI_OOBSYNCC		0x240
+#define	AI_OOBSELOUTCEN		0x244
+#define	AI_OOBSYNCD		0x260
+#define	AI_OOBSELOUTDEN		0x264
+#define	AI_OOBAEXTWIDTH		0x300
+#define	AI_OOBAINWIDTH		0x304
+#define	AI_OOBAOUTWIDTH		0x308
+#define	AI_OOBBEXTWIDTH		0x320
+#define	AI_OOBBINWIDTH		0x324
+#define	AI_OOBBOUTWIDTH		0x328
+#define	AI_OOBCEXTWIDTH		0x340
+#define	AI_OOBCINWIDTH		0x344
+#define	AI_OOBCOUTWIDTH		0x348
+#define	AI_OOBDEXTWIDTH		0x360
+#define	AI_OOBDINWIDTH		0x364
+#define	AI_OOBDOUTWIDTH		0x368
+#define	AI_IOCTRLSET		0x400
+#define	AI_IOCTRLCLEAR		0x404
+#define	AI_IOCTRL		0x408
+#define	AI_IOSTATUS		0x500
+#define	AI_IOCTRLWIDTH		0x700
+#define	AI_IOSTATUSWIDTH	0x704
+#define	AI_RESETCTRL		0x800
+#define	AI_RESETSTATUS		0x804
+#define	AI_RESETREADID		0x808
+#define	AI_RESETWRITEID		0x80c
+#define	AI_ERRLOGCTRL		0xa00
+#define	AI_ERRLOGDONE		0xa04
+#define	AI_ERRLOGSTATUS		0xa08
+#define	AI_ERRLOGADDRLO		0xa0c
+#define	AI_ERRLOGADDRHI		0xa10
+#define	AI_ERRLOGID		0xa14
+#define	AI_ERRLOGUSER		0xa18
+#define	AI_ERRLOGFLAGS		0xa1c
+#define	AI_INTSTATUS		0xa00
+#define	AI_CONFIG		0xe00
+#define	AI_ITCR			0xf00
+#define	AI_ITIPOOBA		0xf10
+#define	AI_ITIPOOBB		0xf14
+#define	AI_ITIPOOBC		0xf18
+#define	AI_ITIPOOBD		0xf1c
+#define	AI_ITIPOOBAOUT		0xf30
+#define	AI_ITIPOOBBOUT		0xf34
+#define	AI_ITIPOOBCOUT		0xf38
+#define	AI_ITIPOOBDOUT		0xf3c
+#define	AI_ITOPOOBA		0xf50
+#define	AI_ITOPOOBB		0xf54
+#define	AI_ITOPOOBC		0xf58
+#define	AI_ITOPOOBD		0xf5c
+#define	AI_ITOPOOBAIN		0xf70
+#define	AI_ITOPOOBBIN		0xf74
+#define	AI_ITOPOOBCIN		0xf78
+#define	AI_ITOPOOBDIN		0xf7c
+#define	AI_ITOPRESET		0xf90
+#define	AI_PERIPHERIALID4	0xfd0
+#define	AI_PERIPHERIALID5	0xfd4
+#define	AI_PERIPHERIALID6	0xfd8
+#define	AI_PERIPHERIALID7	0xfdc
+#define	AI_PERIPHERIALID0	0xfe0
+#define	AI_PERIPHERIALID1	0xfe4
+#define	AI_PERIPHERIALID2	0xfe8
+#define	AI_PERIPHERIALID3	0xfec
+#define	AI_COMPONENTID0		0xff0
+#define	AI_COMPONENTID1		0xff4
+#define	AI_COMPONENTID2		0xff8
+#define	AI_COMPONENTID3		0xffc
+
+
+#define	AIRC_RESET		1
+
+
+#define	AICFG_OOB		0x00000020
+#define	AICFG_IOS		0x00000010
+#define	AICFG_IOC		0x00000008
+#define	AICFG_TO		0x00000004
+#define	AICFG_ERRL		0x00000002
+#define	AICFG_RST		0x00000001
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/bcmcdc.h b/drivers/net/wireless/bcm4329/include/bcmcdc.h
new file mode 100644
index 0000000..c2a860b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmcdc.h
@@ -0,0 +1,100 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h,v 13.14.16.3.16.4 2009/04/12 16:58:45 Exp $
+ */
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+	uint32 cmd;      /* ioctl command value */
+	uint32 len;      /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+	uint32 flags;    /* flag defns given below */
+	uint32 status;   /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK   0x0000FFFF  /* maximum or expected response length, */
+					   /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT  0
+#define CDCL_IOC_INLEN_MASK    0xFFFF0000   /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT   16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR		0x01	/* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET		0x02	/* 0=get, 1=set cmd */
+#define CDCF_IOC_IF_MASK	0xF000	/* I/F index */
+#define CDCF_IOC_IF_SHIFT	12
+#define CDCF_IOC_ID_MASK	0xFFFF0000	/* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT	16		/* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags)	(((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags)	(((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ *   The BDC header is used on data packets to convey priority across USB.
+ */
+
+#define	BDC_HEADER_LEN		4
+
+#define BDC_PROTO_VER		1	/* Protocol version */
+
+#define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+
+#define BDC_FLAG__UNUSED	0x03	/* Unassigned */
+#define BDC_FLAG_SUM_GOOD	0x04	/* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums */
+
+#define BDC_PRIORITY_MASK	0x7
+
+#define BDC_FLAG2_FC_FLAG	0x10	/* flag to indicate if pkt contains */
+									/* FLOW CONTROL info only */
+#define BDC_PRIORITY_FC_SHIFT	4		/* flow control info shift */
+
+#define BDC_FLAG2_IF_MASK	0x0f	/* APSTA: interface on which the packet was received */
+#define BDC_FLAG2_IF_SHIFT	0
+
+#define BDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+struct bdc_header {
+	uint8	flags;			/* Flags */
+	uint8	priority;	/* 802.1d Priority 0:2 bits, 4:7 flow control info for usb */
+	uint8	flags2;
+	uint8	rssi;
+};
diff --git a/drivers/net/wireless/bcm4329/include/bcmdefs.h b/drivers/net/wireless/bcm4329/include/bcmdefs.h
new file mode 100644
index 0000000..f4e9946
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmdefs.h
@@ -0,0 +1,114 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmdefs.h,v 13.38.4.10.2.7.6.11 2010/02/01 05:51:55 Exp $
+ */
+
+
+#ifndef	_bcmdefs_h_
+#define	_bcmdefs_h_
+
+#define STATIC	static
+
+#define	SI_BUS			0	
+#define	PCI_BUS			1	
+#define	PCMCIA_BUS		2	
+#define SDIO_BUS		3	
+#define JTAG_BUS		4	
+#define USB_BUS			5	
+#define SPI_BUS			6	
+
+
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) 	(BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) 	(bus)
+#endif
+
+
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) 	(BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) 	(bus)
+#endif
+
+
+
+#if defined(BCMSPROMBUS)
+#define SPROMBUS	(BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS	(PCMCIA_BUS)
+#else
+#define SPROMBUS	(PCI_BUS)
+#endif
+
+
+#ifdef BCMCHIPID
+#define CHIPID(chip)	(BCMCHIPID)
+#else
+#define CHIPID(chip)	(chip)
+#endif
+
+
+#define DMADDR_MASK_32 0x0		
+#define DMADDR_MASK_30 0xc0000000	
+#define DMADDR_MASK_0  0xffffffff	
+
+#define	DMADDRWIDTH_30  30 
+#define	DMADDRWIDTH_32  32 
+#define	DMADDRWIDTH_63  63 
+#define	DMADDRWIDTH_64  64 
+
+
+#define BCMEXTRAHDROOM 164
+
+
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+
+#define BITFIELD_MASK(width) \
+		(((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+		(((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+		(((val) & (~(field ## _M << field ## _S))) | \
+		 ((unsigned)(bits) << field ## _S))
+
+
+#ifdef BCMSMALL
+#undef	BCMSPACE
+#define bcmspace	FALSE	
+#else
+#define	BCMSPACE
+#define bcmspace	TRUE	
+#endif
+
+
+#define	MAXSZ_NVRAM_VARS	4096
+
+#define LOCATOR_EXTERN static
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/bcmdevs.h b/drivers/net/wireless/bcm4329/include/bcmdevs.h
new file mode 100644
index 0000000..14853f1
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmdevs.h
@@ -0,0 +1,124 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h,v 13.172.4.5.4.10.2.36 2010/05/25 08:33:44 Exp $
+ */
+
+
+#ifndef	_BCMDEVS_H
+#define	_BCMDEVS_H
+
+
+#define	VENDOR_EPIGRAM		0xfeda
+#define	VENDOR_BROADCOM		0x14e4
+#define VENDOR_SI_IMAGE		0x1095		
+#define VENDOR_TI		0x104c		
+#define VENDOR_RICOH		0x1180		
+#define VENDOR_JMICRON		0x197b
+
+
+#define	VENDOR_BROADCOM_PCMCIA	0x02d0
+
+
+#define	VENDOR_BROADCOM_SDIO	0x00BF
+
+
+#define BCM_DNGL_VID            0xa5c
+#define BCM_DNGL_BL_PID_4320    0xbd11
+#define BCM_DNGL_BL_PID_4328    0xbd12
+#define BCM_DNGL_BL_PID_4322    0xbd13
+#define BCM_DNGL_BL_PID_4325    0xbd14
+#define BCM_DNGL_BL_PID_4315    0xbd15
+#define BCM_DNGL_BL_PID_4319    0xbd16
+#define BCM_DNGL_BDC_PID        0xbdc
+
+#define	BCM4325_D11DUAL_ID	0x431b		
+#define	BCM4325_D11G_ID		0x431c		
+#define	BCM4325_D11A_ID		0x431d		
+#define BCM4329_D11NDUAL_ID	0x432e		
+#define BCM4329_D11N2G_ID	0x432f		
+#define BCM4329_D11N5G_ID	0x4330		
+#define BCM4336_D11N_ID		0x4343		
+#define	BCM4315_D11DUAL_ID	0x4334		
+#define	BCM4315_D11G_ID		0x4335		
+#define	BCM4315_D11A_ID		0x4336		
+#define BCM4319_D11N_ID		0x4337		
+#define BCM4319_D11N2G_ID	0x4338		
+#define BCM4319_D11N5G_ID	0x4339		
+
+
+#define SDIOH_FPGA_ID		0x43f2		
+#define SPIH_FPGA_ID		0x43f5		
+#define	BCM4710_DEVICE_ID	0x4710		
+#define BCM27XX_SDIOH_ID	0x2702		
+#define PCIXX21_FLASHMEDIA0_ID	0x8033		
+#define PCIXX21_SDIOH0_ID	0x8034		
+#define PCIXX21_FLASHMEDIA_ID	0x803b		
+#define PCIXX21_SDIOH_ID	0x803c		
+#define R5C822_SDIOH_ID		0x0822		
+#define JMICRON_SDIOH_ID	0x2381		
+
+
+#define	BCM4306_CHIP_ID		0x4306		
+#define	BCM4311_CHIP_ID		0x4311		
+#define	BCM4312_CHIP_ID		0x4312		
+#define	BCM4315_CHIP_ID		0x4315		
+#define	BCM4318_CHIP_ID		0x4318		
+#define	BCM4319_CHIP_ID		0x4319		
+#define	BCM4320_CHIP_ID		0x4320		
+#define	BCM4321_CHIP_ID		0x4321		
+#define	BCM4322_CHIP_ID		0x4322		
+#define	BCM4325_CHIP_ID		0x4325		
+#define	BCM4328_CHIP_ID		0x4328		
+#define	BCM4329_CHIP_ID		0x4329		
+#define BCM4336_CHIP_ID		0x4336		
+#define	BCM4402_CHIP_ID		0x4402		
+#define	BCM4704_CHIP_ID		0x4704		
+#define	BCM4710_CHIP_ID		0x4710		
+#define	BCM4712_CHIP_ID		0x4712		
+#define BCM4785_CHIP_ID		0x4785		
+#define	BCM5350_CHIP_ID		0x5350		
+#define	BCM5352_CHIP_ID		0x5352		
+#define	BCM5354_CHIP_ID		0x5354		
+#define BCM5365_CHIP_ID		0x5365          
+
+
+
+#define	BCM4303_PKG_ID		2		
+#define	BCM4309_PKG_ID		1		
+#define	BCM4712LARGE_PKG_ID	0		
+#define	BCM4712SMALL_PKG_ID	1		
+#define	BCM4712MID_PKG_ID	2		
+#define BCM4328USBD11G_PKG_ID	2		
+#define BCM4328USBDUAL_PKG_ID	3		
+#define BCM4328SDIOD11G_PKG_ID	4		
+#define BCM4328SDIODUAL_PKG_ID	5		
+#define BCM4329_289PIN_PKG_ID	0		
+#define BCM4329_182PIN_PKG_ID	1		
+#define BCM5354E_PKG_ID		1		
+#define HDLSIM5350_PKG_ID	1		
+#define HDLSIM_PKG_ID		14		
+#define HWSIM_PKG_ID		15		
+
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/bcmendian.h b/drivers/net/wireless/bcm4329/include/bcmendian.h
new file mode 100644
index 0000000..ae46838
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmendian.h
@@ -0,0 +1,205 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *  $Id: bcmendian.h,v 1.31.302.1.16.1 2009/02/03 18:34:31 Exp $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+
+#define BCMSWAP16(val) \
+	((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+		  (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+
+#define BCMSWAP32(val) \
+	((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+		  (((uint32)(val) & (uint32)0x0000ff00U) <<  8) | \
+		  (((uint32)(val) & (uint32)0x00ff0000U) >>  8) | \
+		  (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+
+#define BCMSWAP32BY16(val) \
+	((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+		  (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+	return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+	return BCMSWAP32(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+	return BCMSWAP32BY16(val);
+}
+
+
+
+
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+	len = len / 2;
+
+	while (len--) {
+		*buf = bcmswap16(*buf);
+		buf++;
+	}
+}
+
+#ifndef hton16
+#ifndef IL_BIGENDIAN
+#define HTON16(i) BCMSWAP16(i)
+#define HTON32(i) BCMSWAP32(i)
+#define	hton16(i) bcmswap16(i)
+#define	hton32(i) bcmswap32(i)
+#define	ntoh16(i) bcmswap16(i)
+#define	ntoh32(i) bcmswap32(i)
+#define HTOL16(i) (i)
+#define HTOL32(i) (i)
+#define ltoh16(i) (i)
+#define ltoh32(i) (i)
+#define htol16(i) (i)
+#define htol32(i) (i)
+#else
+#define HTON16(i) (i)
+#define HTON32(i) (i)
+#define	hton16(i) (i)
+#define	hton32(i) (i)
+#define	ntoh16(i) (i)
+#define	ntoh32(i) (i)
+#define HTOL16(i) BCMSWAP16(i)
+#define HTOL32(i) BCMSWAP32(i)
+#define	ltoh16(i) bcmswap16(i)
+#define	ltoh32(i) bcmswap32(i)
+#define htol16(i) bcmswap16(i)
+#define htol32(i) bcmswap32(i)
+#endif 
+#endif 
+
+#ifndef IL_BIGENDIAN
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+#else
+#define ltoh16_buf(buf, i) bcmswap16_buf((uint16 *)buf, i)
+#define htol16_buf(buf, i) bcmswap16_buf((uint16 *)buf, i)
+#endif 
+
+
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = val >> 8;
+}
+
+
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = (val >> 16) & 0xff;
+	bytes[3] = val >> 24;
+}
+
+
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val >> 8;
+	bytes[1] = val & 0xff;
+}
+
+
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val >> 24;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+#define _LTOH16_UA(cp)	((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp)	((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp)	(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp)	(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+	return _LTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+	return _LTOH32_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+	return _NTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+	return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#define ltoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)ptr : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)ptr) : \
+	 0xfeedf00d)
+
+#define ntoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)ptr : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)ptr) : \
+	 0xfeedf00d)
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/bcmpcispi.h b/drivers/net/wireless/bcm4329/include/bcmpcispi.h
new file mode 100644
index 0000000..7d98fb7
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmpcispi.h
@@ -0,0 +1,205 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h,v 13.11.8.3 2008/07/09 21:23:29 Exp $
+ */
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/*
++---------------------------------------------------------------------------+
+|                                                                           |
+|                     7     6     5     4     3     2     1       0         |
+| 0x0000  SPI_CTRL    SPIE  SPE   0     MSTR  CPOL  CPHA  SPR1    SPR0      |
+| 0x0004  SPI_STAT    SPIF  WCOL  ST1   ST0   WFFUL WFEMP RFFUL   RFEMP     |
+| 0x0008  SPI_DATA    Bits 31:0, data to send out on MOSI                   |
+| 0x000C  SPI_EXT     ICNT1 ICNT0 BSWAP *HSMODE           ESPR1   ESPR0     |
+| 0x0020  GPIO_OE     0=input, 1=output                   PWR_OE  CS_OE     |
+| 0x0024  GPIO_DATA   CARD:1=missing, 0=present     CARD  PWR_DAT CS_DAT    |
+| 0x0040  INT_EDGE    0=level, 1=edge                     DEV_E   SPI_E     |
+| 0x0044  INT_POL     1=active high, 0=active low         DEV_P   SPI_P     |
+| 0x0048  INTMASK                                         DEV     SPI       |
+| 0x004C  INTSTATUS                                       DEV     SPI       |
+| 0x0060  HEXDISP     Reset value: 0x14e443f5.  In hexdisp mode, value      |
+|                     shows on the Raggedstone1 4-digit 7-segment display.  |
+| 0x0064  CURRENT_MA  Low 16 bits indicate card current consumption in mA   |
+| 0x006C  DISP_SEL    Display mode (0=hexdisp, 1=current)         DSP       |
+| 0x00C0  PLL_CTL  bit31=ext_clk, remainder unused.                         |
+| 0x00C4  PLL_STAT                            LOCK                          |
+| 0x00C8  CLK_FREQ                                                          |
+| 0x00CC  CLK_CNT                                                           |
+|                                                                           |
+| *Notes: HSMODE is not implemented, never set this bit!                    |
+| BSWAP is available in rev >= 8                                            |
+|                                                                           |
++---------------------------------------------------------------------------+
+*/
+
+typedef volatile struct {
+	uint32 spih_ctrl;		/* 0x00 SPI Control Register */
+	uint32 spih_stat;		/* 0x04 SPI Status Register */
+	uint32 spih_data;		/* 0x08 SPI Data Register, 32-bits wide */
+	uint32 spih_ext;		/* 0x0C SPI Extension Register */
+	uint32 PAD[4];			/* 0x10-0x1F PADDING */
+
+	uint32 spih_gpio_ctrl;		/* 0x20 SPI GPIO Control Register */
+	uint32 spih_gpio_data;		/* 0x24 SPI GPIO Data Register */
+	uint32 PAD[6];			/* 0x28-0x3F PADDING */
+
+	uint32 spih_int_edge;		/* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+	uint32 spih_int_pol;		/* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+							/* 1=Active High) */
+	uint32 spih_int_mask;		/* 0x48 SPI Interrupt Mask */
+	uint32 spih_int_status;		/* 0x4C SPI Interrupt Status */
+	uint32 PAD[4];			/* 0x50-0x5F PADDING */
+
+	uint32 spih_hex_disp;		/* 0x60 SPI 4-digit hex display value */
+	uint32 spih_current_ma;		/* 0x64 SPI SD card current consumption in mA */
+	uint32 PAD[1];			/* 0x68 PADDING */
+	uint32 spih_disp_sel;		/* 0x6c SPI 4-digit hex display mode select (1=current) */
+	uint32 PAD[4];			/* 0x70-0x7F PADDING */
+	uint32 PAD[8];			/* 0x80-0x9F PADDING */
+	uint32 PAD[8];			/* 0xA0-0xBF PADDING */
+	uint32 spih_pll_ctrl;	/* 0xC0 PLL Control Register */
+	uint32 spih_pll_status;	/* 0xC4 PLL Status Register */
+	uint32 spih_xtal_freq;	/* 0xC8 External Clock Frequency in units of 10000Hz */
+	uint32 spih_clk_count;	/* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+	uint32 cfg_space[0x40];		/* 0x000-0x0FF PCI Configuration Space (Read Only) */
+	uint32 P_IMG_CTRL0;		/* 0x100 PCI Image0 Control Register */
+
+	uint32 P_BA0;			/* 0x104 32 R/W PCI Image0 Base Address register */
+	uint32 P_AM0;			/* 0x108 32 R/W PCI Image0 Address Mask register */
+	uint32 P_TA0;			/* 0x10C 32 R/W PCI Image0 Translation Address register */
+	uint32 P_IMG_CTRL1;		/* 0x110 32 R/W PCI Image1 Control register */
+	uint32 P_BA1;			/* 0x114 32 R/W PCI Image1 Base Address register */
+	uint32 P_AM1;			/* 0x118 32 R/W PCI Image1 Address Mask register */
+	uint32 P_TA1;			/* 0x11C 32 R/W PCI Image1 Translation Address register */
+	uint32 P_IMG_CTRL2;		/* 0x120 32 R/W PCI Image2 Control register */
+	uint32 P_BA2;			/* 0x124 32 R/W PCI Image2 Base Address register */
+	uint32 P_AM2;			/* 0x128 32 R/W PCI Image2 Address Mask register */
+	uint32 P_TA2;			/* 0x12C 32 R/W PCI Image2 Translation Address register */
+	uint32 P_IMG_CTRL3;		/* 0x130 32 R/W PCI Image3 Control register */
+	uint32 P_BA3;			/* 0x134 32 R/W PCI Image3 Base Address register */
+	uint32 P_AM3;			/* 0x138 32 R/W PCI Image3 Address Mask register */
+	uint32 P_TA3;			/* 0x13C 32 R/W PCI Image3 Translation Address register */
+	uint32 P_IMG_CTRL4;		/* 0x140 32 R/W PCI Image4 Control register */
+	uint32 P_BA4;			/* 0x144 32 R/W PCI Image4 Base Address register */
+	uint32 P_AM4;			/* 0x148 32 R/W PCI Image4 Address Mask register */
+	uint32 P_TA4;			/* 0x14C 32 R/W PCI Image4 Translation Address register */
+	uint32 P_IMG_CTRL5;		/* 0x150 32 R/W PCI Image5 Control register */
+	uint32 P_BA5;			/* 0x154 32 R/W PCI Image5 Base Address register */
+	uint32 P_AM5;			/* 0x158 32 R/W PCI Image5 Address Mask register */
+	uint32 P_TA5;			/* 0x15C 32 R/W PCI Image5 Translation Address register */
+	uint32 P_ERR_CS;		/* 0x160 32 R/W PCI Error Control and Status register */
+	uint32 P_ERR_ADDR;		/* 0x164 32 R PCI Erroneous Address register */
+	uint32 P_ERR_DATA;		/* 0x168 32 R PCI Erroneous Data register */
+
+	uint32 PAD[5];			/* 0x16C-0x17F PADDING */
+
+	uint32 WB_CONF_SPC_BAR;		/* 0x180 32 R WISHBONE Configuration Space Base Address */
+	uint32 W_IMG_CTRL1;		/* 0x184 32 R/W WISHBONE Image1 Control register */
+	uint32 W_BA1;			/* 0x188 32 R/W WISHBONE Image1 Base Address register */
+	uint32 W_AM1;			/* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+	uint32 W_TA1;			/* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+	uint32 W_IMG_CTRL2;		/* 0x194 32 R/W WISHBONE Image2 Control register */
+	uint32 W_BA2;			/* 0x198 32 R/W WISHBONE Image2 Base Address register */
+	uint32 W_AM2;			/* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+	uint32 W_TA2;			/* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+	uint32 W_IMG_CTRL3;		/* 0x1A4 32 R/W WISHBONE Image3 Control register */
+	uint32 W_BA3;			/* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+	uint32 W_AM3;			/* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+	uint32 W_TA3;			/* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+	uint32 W_IMG_CTRL4;		/* 0x1B4 32 R/W WISHBONE Image4 Control register */
+	uint32 W_BA4;			/* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+	uint32 W_AM4;			/* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+	uint32 W_TA4;			/* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+	uint32 W_IMG_CTRL5;		/* 0x1C4 32 R/W WISHBONE Image5 Control register */
+	uint32 W_BA5;			/* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+	uint32 W_AM5;			/* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+	uint32 W_TA5;			/* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+	uint32 W_ERR_CS;		/* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+	uint32 W_ERR_ADDR;		/* 0x1D8 32 R WISHBONE Erroneous Address register */
+	uint32 W_ERR_DATA;		/* 0x1DC 32 R WISHBONE Erroneous Data register */
+	uint32 CNF_ADDR;		/* 0x1E0 32 R/W Configuration Cycle register */
+	uint32 CNF_DATA;		/* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+	uint32 INT_ACK;			/* 0x1E8 32 R Interrupt Acknowledge register */
+	uint32 ICR;			/* 0x1EC 32 R/W Interrupt Control register */
+	uint32 ISR;			/* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN		(1 << 0)	/* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN	(1 << 1)	/* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN	(1 << 2)	/* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN	(1 << 3)	/* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN	(1 << 4)	/* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET	(1U << 31)	/* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST		(1 << 0)	/* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST	(1 << 1)	/* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST	(1 << 2)	/* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST	(1 << 3)	/* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST	(1 << 4)	/* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR		(1 << 0)	/* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR		(1 << 1)	/* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR		(1 << 2)	/* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS				(1 << 0)	/* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER		(1 << 1)	/* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT	(1 << 2)	/* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK		0x30		/* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT	4		/* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL			(1 << 3)	/* SPI Write FIFO Full */
+#define SPIH_WFEMPTY		(1 << 2)	/* SPI Write FIFO Empty */
+#define SPIH_RFFULL			(1 << 1)	/* SPI Read FIFO Full */
+#define SPIH_RFEMPTY		(1 << 0)	/* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK		(1U << 31)	/* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK		(1 << 1)	/* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED		(1 << 3)	/* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND		0xf4240		/* 1 million */
diff --git a/drivers/net/wireless/bcm4329/include/bcmperf.h b/drivers/net/wireless/bcm4329/include/bcmperf.h
new file mode 100644
index 0000000..2a78784
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h,v 13.5 2007/09/14 22:00:59 Exp $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define	BCMPERF_GETICACHE_MISS(x)	((x) = 0)
+#define	BCMPERF_GETICACHE_HIT(x)	((x) = 0)
+#define	BCMPERF_GETINSTRCOUNT(x)	((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdbus.h b/drivers/net/wireless/bcm4329/include/bcmsdbus.h
new file mode 100644
index 0000000..b7b67bc
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdbus.h
@@ -0,0 +1,117 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h,v 13.11.14.2.6.6 2009/10/27 17:20:28 Exp $
+ */
+
+#ifndef	_sdio_api_h_
+#define	_sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS                          (0x00)
+#define SDIOH_API_RC_FAIL	                      (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL   0       /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND   1       /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU  2       /* Cut-through command */
+
+#define SDIOH_DATA_PIO          0       /* PIO mode */
+#define SDIOH_DATA_DMA          1       /* DMA mode */
+
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+/* attach, return handler on success, NULL if failed.
+ *  The handler shall be provided by all subsequent calls. No local cache
+ *  cfghdl points to the starting address of pci device mapped memory
+ */
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+	uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+	uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+	void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                          void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+/* Helper function */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdh.h b/drivers/net/wireless/bcm4329/include/bcmsdh.h
new file mode 100644
index 0000000..f5dee5c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdh.h
@@ -0,0 +1,208 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ *     export functions to client drivers
+ *     abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h,v 13.35.14.7.6.8 2009/10/14 04:22:25 Exp $
+ */
+
+#ifndef	_bcmsdh_h_
+#define	_bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL	0x0001 /* Error */
+#define BCMSDH_INFO_VAL		0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+/* Attach and build an interface to the underlying SD host driver.
+ *  - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
+ *  - Returns the bcmsdh handle and virtual address base for register access.
+ *    The returned handle should be used in all subsequent calls, but the bcmsh
+ *    implementation may maintain a single "default" handle (e.g. the first or
+ *    most recent one) to enable single-instance implementations to pass NULL.
+ */
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+#ifdef BCMLXSDMMC
+extern int bcmsdh_claim_host_and_lock(void *sdh);
+extern int bcmsdh_release_host_and_unlock(void *sdh);
+#endif /* BCMLXSDMMC */
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ *   fn:   function number
+ *   addr: unmodified SDIO-space address
+ *   data: data byte to write
+ *   err:  pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ *   fn:     function whose CIS is being requested (0 is common CIS)
+ *   cis:    pointer to memory location to place results
+ *   length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ *   addr: backplane address (i.e. >= regsva from attach)
+ *   size: register width in bytes (2 or 4)
+ *   data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   addr:     backplane address (i.e. >= regsva from attach)
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete, void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE	0x1	/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED	0x2	/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC	0x4	/* Async request (vs. sync request) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING	1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+                           void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+/* callback functions */
+typedef struct {
+	/* attach to device */
+	void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+	                uint16 func, uint bustype, void * regsva, osl_t * osh,
+	                void * param);
+	/* detach from device */
+	void (*detach)(void *ch);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_register_oob_intr(void * dhdp);
+extern void bcmsdh_unregister_oob_intr(void);
+extern void bcmsdh_oob_intr_set(bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+#endif	/* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h
new file mode 100644
index 0000000..4e6d1b5
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdh_sdmmc.h
@@ -0,0 +1,122 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h,v 13.1.2.1.8.7 2009/10/27 18:22:52 Exp $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+/* Allocate/init/free per-OS private data */
+extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
+extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4		2
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+struct sdioh_info {
+	osl_t 		*osh;			/* osh handler */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint 		irq;			/* Client irq */
+	int 		intrcount;		/* Client interrupts */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	uint		max_dma_len;
+	uint		max_dma_descriptors;	/* DMA Descriptors supported by this controller. */
+//	SDDMA_DESCRIPTOR	SGList[32];	/* Scatter/Gather DMA List */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+	sdioh_info_t	*sd;
+	struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdpcm.h b/drivers/net/wireless/bcm4329/include/bcmsdpcm.h
new file mode 100644
index 0000000..77aca45
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdpcm.h
@@ -0,0 +1,263 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h,v 1.1.2.4 2010/07/02 01:15:46 Exp $
+ */
+
+#ifndef	_bcmsdpcm_h_
+#define	_bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK	I_SMB_SW0	/* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK	I_SMB_SW1	/* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB	I_SMB_SW2	/* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT	I_SMB_SW3	/* To SB Mailbox Miscellaneous Interrupt */
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK	0x0000000f	/* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK	0x00ff0000	/* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT	16		/* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* To Host Mailbox Miscellaneous Interrupt */
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON	(1 << 0)	/* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE	(1 << 1)	/* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND	(1 << 2)	/* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT	(1 << 3)	/* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK	0x0000000f	/* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	1	/* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY	2	/* we're ready to to talk to host after enable */
+#define HMB_DATA_FC		4	/* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY	8	/* firmware is ready for protocol activity */
+
+#define HMB_DATA_FCDATA_MASK	0xff000000	/* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT	24		/* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000	/* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT	16		/* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK		0x000000ff	/* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK		0x00000f00	/* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT		8		/* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK		0x0000f000	/* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT		12		/* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK		0x00ff0000	/* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT		16		/* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET		2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET		3		/* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) 		(((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+
+#define SDPCM_FCMASK_OFFSET		4		/* Flow control */
+#define SDPCM_FCMASK_VALUE(p)		(((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET		5		/* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p)		(((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET		6		/* Version # */
+#define SDPCM_VERSION_VALUE(p)		(((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET		7		/* Spare */
+#define SDPCM_UNUSED_VALUE(p)		(((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN	8	/* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL	0	/* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL	1	/* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL	2	/* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL	3	/* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL	15	/* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL	15
+
+#define SDPCM_SEQUENCE_WRAP	256	/* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0	0x01
+#define SDPCM_FLAG_RESVD1	0x02
+#define SDPCM_FLAG_GSPI_TXENAB	0x04
+#define SDPCM_FLAG_GLOMDESC	0x08	/* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG	(SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p)	(((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN	4	/* Generally: Cmd(1), Ext(1), Len(2);
+					 * Semantics of Ext byte depend on command.
+					 * Len is current or requested frame length, not
+					 * including test header; sent little-endian.
+					 */
+#define SDPCM_TEST_DISCARD	0x01	/* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ	0x02	/* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP	0x03	/* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST	0x04	/* Receiver to send a burst. Ext is a frame count */
+#define SDPCM_TEST_SEND		0x05	/* Receiver sets send mode. Ext is boolean on/off */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id)	((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+	uint32 cmd52rd;		/* Cmd52RdCount, SDIO: cmd52 reads */
+	uint32 cmd52wr;		/* Cmd52WrCount, SDIO: cmd52 writes */
+	uint32 cmd53rd;		/* Cmd53RdCount, SDIO: cmd53 reads */
+	uint32 cmd53wr;		/* Cmd53WrCount, SDIO: cmd53 writes */
+	uint32 abort;		/* AbortCount, SDIO: aborts */
+	uint32 datacrcerror;	/* DataCrcErrorCount, SDIO: frames w/CRC error */
+	uint32 rdoutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+	uint32 wroutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+	uint32 writebusy;	/* WriteBusyCount, SDIO: device asserted "busy" */
+	uint32 readwait;	/* ReadWaitCount, SDIO: no data ready for a read cmd */
+	uint32 readterm;	/* ReadTermCount, SDIO: read frame termination cmds */
+	uint32 writeterm;	/* WriteTermCount, SDIO: write frames termination cmds */
+	uint32 rxdescuflo;	/* receive descriptor underflows */
+	uint32 rxfifooflo;	/* receive fifo overflows */
+	uint32 txfifouflo;	/* transmit fifo underflows */
+	uint32 runt;		/* runt (too short) frames recv'd from bus */
+	uint32 badlen;		/* frame's rxh len does not match its hw tag len */
+	uint32 badcksum;	/* frame's hw tag chksum doesn't agree with len value */
+	uint32 seqbreak;	/* break in sequence # space from one rx frame to the next */
+	uint32 rxfcrc;		/* frame rx header indicates crc error */
+	uint32 rxfwoos;		/* frame rx header indicates write out of sync */
+	uint32 rxfwft;		/* frame rx header indicates write frame termination */
+	uint32 rxfabort;	/* frame rx header indicates frame aborted */
+	uint32 woosint;		/* write out of sync interrupt */
+	uint32 roosint;		/* read out of sync interrupt */
+	uint32 rftermint;	/* read frame terminate interrupt */
+	uint32 wftermint;	/* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val)	((var) == (val))
+#define SDIODREV_GE(var, val)	((var) >= (val))
+#define SDIODREV_GT(var, val)	((var) > (val))
+#define SDIODREV_LT(var, val)	((var) < (val))
+#define SDIODREV_LE(var, val)	((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+	(SDIODREV_LT((h)->corerev, 1) ? \
+	 SDIODDMAREG32((h), (dir), (chnl)) : \
+	 SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODDMAREG(h, dir, chnl) : \
+	 PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+	(SDIODREV_LT((corerev), 1) ? \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+	((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODFIFOREG(h, corerev) : \
+	 PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host
+ * The structure contains pointers to trap or assert information shared with the host
+ */
+#define SDPCM_SHARED_VERSION       0x0002
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+
+typedef struct {
+	uint32	flags;
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hndrte_cons_t */
+	uint32  msgtrace_addr;
+	uint8   tag[32];
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+#endif	/* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdspi.h b/drivers/net/wireless/bcm4329/include/bcmsdspi.h
new file mode 100644
index 0000000..eaae10d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdspi.h
@@ -0,0 +1,131 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h,v 13.8.10.2 2008/06/30 21:09:40 Exp $
+ */
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint		bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of sdspi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	bool		got_hcint;		/* Host Controller interrupt. */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current register transfer size */
+	uint32		cmd53_wr_data;		/* Used to pass CMD53 write data */
+	uint32		card_response;		/* Used to pass back response status byte */
+	uint32		card_rsp_data;		/* Used to pass back response data word */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
diff --git a/drivers/net/wireless/bcm4329/include/bcmsdstd.h b/drivers/net/wireless/bcm4329/include/bcmsdstd.h
new file mode 100644
index 0000000..974b3d4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmsdstd.h
@@ -0,0 +1,223 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h,v 13.16.18.1.16.3 2009/12/10 01:09:23 Exp $
+ */
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+#define SDIOH_MODE_SD1		1
+#define SDIOH_MODE_SD4		2
+
+#define MAX_SLOTS 6 	/* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ	0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK	1
+#define SDIOH_TYPE_BCM27XX	2
+#define SDIOH_TYPE_TI_PCIXX21	4	/* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822	5	/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON	6	/* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS   0x00001E00
+
+#define RETRIES_LARGE 100000
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+#define USE_FIFO		0x8	/* Fifo vs non-fifo */
+
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint32 curr_caps;                    	/* max current capabilities reg */
+
+	osl_t 		*osh;			/* osh handler */
+	volatile char 	*mem_space;		/* pci device memory va */
+	uint		lockcount; 		/* nest count of sdstd_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint		target_dev;		/* Target device ID */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	int 		intrcount;		/* Client interrupts */
+	int 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current transfer */
+	uint16 		card_rca;		/* Current Address */
+	int8		sd_dma_mode;		/* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;		/* DMA Buffer virtual address */
+	ulong		dma_phys;		/* DMA Buffer physical address */
+	void		*adma2_dscr_buf;	/* ADMA2 Descriptor Buffer virtual address */
+	ulong		adma2_dscr_phys;	/* ADMA2 Descriptor Buffer physical address */
+
+	/* adjustments needed to make the dma align properly */
+	void		*dma_start_buf;
+	ulong		dma_start_phys;
+	uint		alloced_dma_size;
+	void		*adma2_dscr_start_buf;
+	ulong		adma2_dscr_start_phys;
+	uint		alloced_adma2_dscr_size;
+
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	bool		got_hcint;		/* local interrupt flag */
+	uint16		last_intrstatus;	/* to cache intrstatus */
+};
+
+#define DMA_MODE_NONE	0
+#define DMA_MODE_SDMA	1
+#define DMA_MODE_ADMA1	2
+#define DMA_MODE_ADMA2	3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO	-1
+
+#define USE_DMA(sd)		((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE			0
+#define SDIOH_ADMA1_MODE		1
+#define SDIOH_ADMA2_MODE		2
+#define SDIOH_ADMA2_64_MODE		3
+
+#define ADMA2_ATTRIBUTE_VALID		(1 << 0)	/* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END			(1 << 1)	/* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT			(1 << 2)	/* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP		(0 << 4)	/* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV		(1 << 4)	/* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET		(1 << 4)	/* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN	(2 << 4)	/* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK	(3 << 4)	/* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+	uint32 len_attr;
+	uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+	uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern uint16 sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield);
diff --git a/drivers/net/wireless/bcm4329/include/bcmspi.h b/drivers/net/wireless/bcm4329/include/bcmspi.h
new file mode 100644
index 0000000..2e2bc93
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmspi.h
@@ -0,0 +1,36 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h,v 13.3.10.2 2008/06/30 21:09:40 Exp $
+ */
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
diff --git a/drivers/net/wireless/bcm4329/include/bcmspibrcm.h b/drivers/net/wireless/bcm4329/include/bcmspibrcm.h
new file mode 100644
index 0000000..9dce878
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmspibrcm.h
@@ -0,0 +1,134 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.h,v 1.4.4.1.4.3.6.1 2008/09/27 17:03:25 Exp $
+ */
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_F1		64
+#define BLOCK_SIZE_F2 		2048
+#define BLOCK_SIZE_F3 		2048
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+#define ERROR_UF	2
+#define ERROR_OF	3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint 		cfg_bar;		/* pci cfg address for bar */
+	uint32		caps;			/* cached value of capabilities reg */
+	void		*bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of spi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SPI_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current transfer */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		card_dstatus;		/* 32bit device status */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SPI_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	uint32		wordlen;			/* host processor 16/32bits */
+	uint32		prev_fun;
+	uint32		chip;
+	uint32		chiprev;
+	bool		resp_delay_all;
+	bool		dwordmode;
+
+	struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M			BITFIELD_MASK(1)	/* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S			31
+#define SPI_ACCESS_M			BITFIELD_MASK(1)	/* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S			30
+#define SPI_FUNCTION_M			BITFIELD_MASK(2)	/* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S			28
+#define SPI_REG_ADDR_M			BITFIELD_MASK(17)	/* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S			11
+#define SPI_LEN_M			BITFIELD_MASK(11)	/* Bit [10:0] - Packet length */
+#define SPI_LEN_S			0
diff --git a/drivers/net/wireless/bcm4329/include/bcmutils.h b/drivers/net/wireless/bcm4329/include/bcmutils.h
new file mode 100644
index 0000000..f85ed35
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmutils.h
@@ -0,0 +1,637 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.h,v 13.184.4.6.2.1.18.25 2010/04/26 06:05:24 Exp $
+ */
+
+
+#ifndef	_bcmutils_h_
+#define	_bcmutils_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define _BCM_U	0x01	
+#define _BCM_L	0x02	
+#define _BCM_D	0x04	
+#define _BCM_C	0x08	
+#define _BCM_P	0x10	
+#define _BCM_S	0x20	
+#define _BCM_X	0x40	
+#define _BCM_SP	0x80	
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x)	(bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c)	((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c)	((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c)	((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c)	((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c)	((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c)	((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c)	((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c)	(bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c)	(bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+
+
+struct bcmstrbuf {
+	char *buf;	
+	unsigned int size;	
+	char *origbuf;	
+	unsigned int origsize;	
+};
+
+
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 	0x20	
+
+
+#define SPINWAIT(exp, us) { \
+	uint countdown = (us) + 9; \
+	while ((exp) && (countdown >= 10)) {\
+		OSL_DELAY(10); \
+		countdown -= 10; \
+	} \
+}
+
+
+
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT        128	
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC           16	
+#endif
+
+typedef struct pktq_prec {
+	void *head;     
+	void *tail;     
+	uint16 len;     
+	uint16 max;     
+} pktq_prec_t;
+
+
+
+struct pktq {
+	uint16 num_prec;        
+	uint16 hi_prec;         
+	uint16 max;             
+	uint16 len;             
+	
+	struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+
+struct spktq {
+	uint16 num_prec;        
+	uint16 hi_prec;         
+	uint16 max;             
+	uint16 len;             
+	
+	struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+
+
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+
+
+#define pktq_psetmax(pq, prec, _max)    ((pq)->q[prec].max = (_max))
+#define pktq_plen(pq, prec)             ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec)           ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec)            ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec)           ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec)            ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec)       ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir);
+
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
+
+
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+
+
+#define pktq_len(pq)                    ((int)(pq)->len)
+#define pktq_max(pq)                    ((int)(pq)->max)
+#define pktq_avail(pq)                  ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq)                   ((pq)->len >= (pq)->max)
+#define pktq_empty(pq)                  ((pq)->len == 0)
+
+
+#define pktenq(pq, p)		pktq_penq(((struct pktq *)pq), 0, (p))
+#define pktenq_head(pq, p)	pktq_penq_head(((struct pktq *)pq), 0, (p))
+#define pktdeq(pq)		pktq_pdeq(((struct pktq *)pq), 0)
+#define pktdeq_tail(pq)		pktq_pdeq_tail(((struct pktq *)pq), 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+
+
+
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+#define	PKTPRIO_VDSCP	0x100		
+#define	PKTPRIO_VLAN	0x200		
+#define	PKTPRIO_UPD	0x400		
+#define	PKTPRIO_DSCP	0x800		
+
+
+extern int bcm_atoi(char *s);
+extern ulong bcm_strtoul(char *cp, char **endp, uint base);
+extern char *bcmstrstr(char *haystack, char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(char *p, struct ether_addr *ea);
+
+
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+
+
+extern void bcm_mdelay(uint ms);
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define	bcmlog(fmt, a1, a2)
+#define	bcmdumplog(buf, size)	*buf = '\0'
+#define	bcmdumplogent(buf, idx)	-1
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+
+
+
+
+typedef struct bcm_iovar {
+	const char *name;	
+	uint16 varid;		
+	uint16 flags;		
+	uint16 type;		
+	uint16 minlen;		
+} bcm_iovar_t;
+
+
+
+
+#define IOV_GET 0 
+#define IOV_SET 1 
+
+
+#define IOV_GVAL(id)		((id)*2)
+#define IOV_SVAL(id)		(((id)*2)+IOV_SET)
+#define IOV_ISSET(actionid)	((actionid & IOV_SET) == IOV_SET)
+
+
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+
+#endif	
+
+
+#define IOVT_VOID	0	
+#define IOVT_BOOL	1	
+#define IOVT_INT8	2	
+#define IOVT_UINT8	3	
+#define IOVT_INT16	4	
+#define IOVT_UINT16	5	
+#define IOVT_INT32	6	
+#define IOVT_UINT32	7	
+#define IOVT_BUFFER	8	
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+
+#define BCM_IOV_TYPE_INIT { \
+	"void", \
+	"bool", \
+	"int8", \
+	"uint8", \
+	"int16", \
+	"uint16", \
+	"int32", \
+	"uint32", \
+	"buffer", \
+	"" }
+
+#define BCM_IOVT_IS_INT(type) (\
+	(type == IOVT_BOOL) || \
+	(type == IOVT_INT8) || \
+	(type == IOVT_UINT8) || \
+	(type == IOVT_INT16) || \
+	(type == IOVT_UINT16) || \
+	(type == IOVT_INT32) || \
+	(type == IOVT_UINT32))
+
+
+
+#define BCME_STRLEN 		64	
+#define VALID_BCMERROR(e)  ((e <= 0) && (e >= BCME_LAST))
+
+
+
+
+#define BCME_OK				0	
+#define BCME_ERROR			-1	
+#define BCME_BADARG			-2	
+#define BCME_BADOPTION			-3	
+#define BCME_NOTUP			-4	
+#define BCME_NOTDOWN			-5	
+#define BCME_NOTAP			-6	
+#define BCME_NOTSTA			-7	
+#define BCME_BADKEYIDX			-8	
+#define BCME_RADIOOFF 			-9	
+#define BCME_NOTBANDLOCKED		-10	
+#define BCME_NOCLK			-11	
+#define BCME_BADRATESET			-12	
+#define BCME_BADBAND			-13	
+#define BCME_BUFTOOSHORT		-14	
+#define BCME_BUFTOOLONG			-15	
+#define BCME_BUSY			-16	
+#define BCME_NOTASSOCIATED		-17	
+#define BCME_BADSSIDLEN			-18	
+#define BCME_OUTOFRANGECHAN		-19	
+#define BCME_BADCHAN			-20	
+#define BCME_BADADDR			-21	
+#define BCME_NORESOURCE			-22	
+#define BCME_UNSUPPORTED		-23	
+#define BCME_BADLEN			-24	
+#define BCME_NOTREADY			-25	
+#define BCME_EPERM			-26	
+#define BCME_NOMEM			-27	
+#define BCME_ASSOCIATED			-28	
+#define BCME_RANGE			-29	
+#define BCME_NOTFOUND			-30	
+#define BCME_WME_NOT_ENABLED		-31	
+#define BCME_TSPEC_NOTFOUND		-32	
+#define BCME_ACM_NOTSUPPORTED		-33	
+#define BCME_NOT_WME_ASSOCIATION	-34	
+#define BCME_SDIO_ERROR			-35	
+#define BCME_DONGLE_DOWN		-36	
+#define BCME_VERSION			-37	
+#define BCME_TXFAIL			-38	
+#define BCME_RXFAIL			-39	
+#define BCME_NODEVICE			-40	
+#define BCME_UNFINISHED			-41	
+#define BCME_LAST			BCME_UNFINISHED
+
+
+#define BCMERRSTRINGTABLE {		\
+	"OK",				\
+	"Undefined error",		\
+	"Bad Argument",			\
+	"Bad Option",			\
+	"Not up",			\
+	"Not down",			\
+	"Not AP",			\
+	"Not STA",			\
+	"Bad Key Index",		\
+	"Radio Off",			\
+	"Not band locked",		\
+	"No clock",			\
+	"Bad Rate valueset",		\
+	"Bad Band",			\
+	"Buffer too short",		\
+	"Buffer too long",		\
+	"Busy",				\
+	"Not Associated",		\
+	"Bad SSID len",			\
+	"Out of Range Channel",		\
+	"Bad Channel",			\
+	"Bad Address",			\
+	"Not Enough Resources",		\
+	"Unsupported",			\
+	"Bad length",			\
+	"Not Ready",			\
+	"Not Permitted",		\
+	"No Memory",			\
+	"Associated",			\
+	"Not In Range",			\
+	"Not Found",			\
+	"WME Not Enabled",		\
+	"TSPEC Not Found",		\
+	"ACM Not Supported",		\
+	"Not WME Association",		\
+	"SDIO Bus Error",		\
+	"Dongle Not Accessible",	\
+	"Incorrect version",		\
+	"TX Failure",			\
+	"RX Failure",			\
+	"Device Not Present",		\
+	"Command not finished",		\
+}
+
+#ifndef ABS
+#define	ABS(a)			(((a) < 0)?-(a):(a))
+#endif 
+
+#ifndef MIN
+#define	MIN(a, b)		(((a) < (b))?(a):(b))
+#endif 
+
+#ifndef MAX
+#define	MAX(a, b)		(((a) > (b))?(a):(b))
+#endif 
+
+#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
+#define	ROUNDUP(x, y)		((((x)+((y)-1))/(y))*(y))
+#define	ISALIGNED(a, x)		(((a) & ((x)-1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define	ISPOWEROF2(x)		((((x)-1)&(x)) == 0)
+#define VALID_MASK(mask)	!((mask) & ((mask) + 1))
+#ifndef OFFSETOF
+#define	OFFSETOF(type, member)	((uint)(uintptr)&((type *)0)->member)
+#endif 
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a)		(sizeof(a)/sizeof(a[0]))
+#endif
+
+
+#ifndef setbit
+#ifndef NBBY		      
+#define	NBBY	8	
+#endif 
+#define	setbit(a, i)	(((uint8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define	clrbit(a, i)	(((uint8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define	isset(a, i)	(((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define	isclr(a, i)	((((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif 
+
+#define	NBITS(type)	(sizeof(type) * 8)
+#define NBITVAL(nbits)	(1 << (nbits))
+#define MAXBITVAL(nbits)	((1 << (nbits)) - 1)
+#define	NBITMASK(nbits)	MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte)	MAXBITVAL((nbyte) * 8)
+
+
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+
+#define MODADD(x, y, bound) \
+    MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+    MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+
+#define CRC8_INIT_VALUE  0xff		
+#define CRC8_GOOD_VALUE  0x9f		
+#define CRC16_INIT_VALUE 0xffff		
+#define CRC16_GOOD_VALUE 0xf0b8		
+#define CRC32_INIT_VALUE 0xffffffff	
+#define CRC32_GOOD_VALUE 0xdebb20e3	
+
+
+typedef struct bcm_bit_desc {
+	uint32	bit;
+	const char* name;
+} bcm_bit_desc_t;
+
+
+typedef struct bcm_tlv {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} bcm_tlv_t;
+
+
+#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
+
+
+#define ETHER_ADDR_STR_LEN	18	
+
+
+#ifdef IL_BIGENDIAN
+static INLINE uint32
+load32_ua(uint8 *a)
+{
+	return ((a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]);
+}
+
+static INLINE void
+store32_ua(uint8 *a, uint32 v)
+{
+	a[0] = (v >> 24) & 0xff;
+	a[1] = (v >> 16) & 0xff;
+	a[2] = (v >> 8) & 0xff;
+	a[3] = v & 0xff;
+}
+
+static INLINE uint16
+load16_ua(uint8 *a)
+{
+	return ((a[0] << 8) | a[1]);
+}
+
+static INLINE void
+store16_ua(uint8 *a, uint16 v)
+{
+	a[0] = (v >> 8) & 0xff;
+	a[1] = v & 0xff;
+}
+
+#else 
+
+static INLINE uint32
+load32_ua(uint8 *a)
+{
+	return ((a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]);
+}
+
+static INLINE void
+store32_ua(uint8 *a, uint32 v)
+{
+	a[3] = (v >> 24) & 0xff;
+	a[2] = (v >> 16) & 0xff;
+	a[1] = (v >> 8) & 0xff;
+	a[0] = v & 0xff;
+}
+
+static INLINE uint16
+load16_ua(uint8 *a)
+{
+	return ((a[1] << 8) | a[0]);
+}
+
+static INLINE void
+store16_ua(uint8 *a, uint16 v)
+{
+	a[1] = (v >> 8) & 0xff;
+	a[0] = v & 0xff;
+}
+
+#endif 
+
+
+
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+	if (
+#ifdef __i386__
+	    1 ||
+#endif
+	    (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+		
+		
+		((uint32 *)dst)[0] = ((uint32 *)src1)[0] ^ ((uint32 *)src2)[0];
+		((uint32 *)dst)[1] = ((uint32 *)src1)[1] ^ ((uint32 *)src2)[1];
+		((uint32 *)dst)[2] = ((uint32 *)src1)[2] ^ ((uint32 *)src2)[2];
+		((uint32 *)dst)[3] = ((uint32 *)src1)[3] ^ ((uint32 *)src2)[3];
+	} else {
+		
+		int k;
+		for (k = 0; k < 16; k++)
+			dst[k] = src1[k] ^ src2[k];
+	}
+}
+
+
+
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC)
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+extern void prhex(const char *msg, uchar *buf, uint len);
+#endif 
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+
+extern const char *bcmerrorstr(int bcmerror);
+
+
+typedef uint32 mbool;
+#define mboolset(mb, bit)		((mb) |= (bit))		
+#define mboolclr(mb, bit)		((mb) &= ~(bit))	
+#define mboolisset(mb, bit)		(((mb) & (bit)) != 0)	
+#define	mboolmaskset(mb, mask, val)	((mb) = (((mb) & ~(mask)) | (val)))
+
+
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+
+
+struct fielddesc {
+	const char *nameandfmt;
+	uint32 	offset;
+	uint32 	len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(char *name, const uchar *cdata, int len);
+
+typedef  uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+                          char *buf, uint32 bufsize);
+
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif 
+
+
+#define SSID_FMT_BUF_LEN	((4 * DOT11_MAX_SSID_LEN) + 1)
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/bcmwifi.h b/drivers/net/wireless/bcm4329/include/bcmwifi.h
new file mode 100644
index 0000000..038aedc
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/bcmwifi.h
@@ -0,0 +1,154 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi.h,v 1.15.30.4 2010/03/10 20:10:52 Exp $
+ */
+
+
+#ifndef	_bcmwifi_h_
+#define	_bcmwifi_h_
+
+
+
+typedef uint16 chanspec_t;
+
+
+#define CH_UPPER_SB			0x01
+#define CH_LOWER_SB			0x02
+#define CH_EWA_VALID			0x04
+#define CH_20MHZ_APART			4
+#define CH_10MHZ_APART			2
+#define CH_5MHZ_APART			1
+#define CH_MAX_2G_CHANNEL		14	
+#define WLC_MAX_2G_CHANNEL		CH_MAX_2G_CHANNEL 
+#define	MAXCHANNEL		224	
+
+#define WL_CHANSPEC_CHAN_MASK		0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT		0
+
+#define WL_CHANSPEC_CTL_SB_MASK		0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT	     8
+#define WL_CHANSPEC_CTL_SB_LOWER	0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER	0x0200
+#define WL_CHANSPEC_CTL_SB_NONE		0x0300
+
+#define WL_CHANSPEC_BW_MASK		0x0C00
+#define WL_CHANSPEC_BW_SHIFT		    10
+#define WL_CHANSPEC_BW_10		0x0400
+#define WL_CHANSPEC_BW_20		0x0800
+#define WL_CHANSPEC_BW_40		0x0C00
+
+#define WL_CHANSPEC_BAND_MASK		0xf000
+#define WL_CHANSPEC_BAND_SHIFT		12
+#define WL_CHANSPEC_BAND_5G		0x1000
+#define WL_CHANSPEC_BAND_2G		0x2000
+#define INVCHANSPEC			255
+
+
+#define WF_CHAN_FACTOR_2_4_G		4814	
+#define WF_CHAN_FACTOR_5_G		10000	
+#define WF_CHAN_FACTOR_4_G		8000	
+
+
+#define LOWER_20_SB(channel)	((channel > CH_10MHZ_APART) ? (channel - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel)	((channel < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+				(channel + CH_10MHZ_APART) : 0)
+#define CHSPEC_WLCBANDUNIT(chspec)	(CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel)	(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+				WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+				WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel)	((channel < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+					(channel + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+					((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+					WL_CHANSPEC_BAND_5G))
+#define CHSPEC_CHANNEL(chspec)	((uint8)(chspec & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec)	(chspec & WL_CHANSPEC_BAND_MASK)
+
+#ifdef WL20MHZ_ONLY
+
+#define CHSPEC_CTL_SB(chspec)	WL_CHANSPEC_CTL_SB_NONE
+#define CHSPEC_BW(chspec)	WL_CHANSPEC_BW_20
+#define CHSPEC_IS10(chspec)	0
+#define CHSPEC_IS20(chspec)	1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	0
+#endif
+
+#else 
+
+#define CHSPEC_CTL_SB(chspec)	(chspec & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec)	(chspec & WL_CHANSPEC_BW_MASK)
+#define CHSPEC_IS10(chspec)	((chspec & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec)	((chspec & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#endif 
+
+#define CHSPEC_IS5G(chspec)	((chspec & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec)	((chspec & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_NONE(chspec)	((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+#define CHSPEC_SB_UPPER(chspec)	((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+#define CHSPEC_SB_LOWER(chspec)	((chspec & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+#define CHSPEC_CTL_CHAN(chspec)  ((CHSPEC_SB_LOWER(chspec)) ? \
+				  (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+				  (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G((chspec))? WLC_BAND_5G: WLC_BAND_2G)
+
+#define CHANSPEC_STR_LEN    8
+
+
+#define WLC_MAXRATE	108	
+#define WLC_RATE_1M	2	
+#define WLC_RATE_2M	4	
+#define WLC_RATE_5M5	11	
+#define WLC_RATE_11M	22	
+#define WLC_RATE_6M	12	
+#define WLC_RATE_9M	18	
+#define WLC_RATE_12M	24	
+#define WLC_RATE_18M	36	
+#define WLC_RATE_24M	48	
+#define WLC_RATE_36M	72	
+#define WLC_RATE_48M	96	
+#define WLC_RATE_54M	108	
+
+#define WLC_2G_25MHZ_OFFSET		5	
+
+
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+
+extern chanspec_t wf_chspec_aton(char *a);
+
+
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/dhdioctl.h b/drivers/net/wireless/bcm4329/include/dhdioctl.h
new file mode 100644
index 0000000..980a143
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/dhdioctl.h
@@ -0,0 +1,123 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h,v 13.7.8.1.4.1.16.5 2010/05/21 21:49:38 Exp $
+ */
+
+#ifndef _dhdioctl_h_
+#define	_dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	bool set;	/* get or set request (optional) */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+	uint driver;	/* to identify target driver */
+} dhd_ioctl_t;
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC		0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION	1
+
+#define	DHD_IOCTL_MAXLEN	8192		/* max length ioctl buffer required */
+#define	DHD_IOCTL_SMLEN		256		/* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC				0
+#define DHD_GET_VERSION				1
+#define DHD_GET_VAR				2
+#define DHD_SET_VAR				3
+
+/* message levels */
+#define DHD_ERROR_VAL	0x0001
+#define DHD_TRACE_VAL	0x0002
+#define DHD_INFO_VAL	0x0004
+#define DHD_DATA_VAL	0x0008
+#define DHD_CTL_VAL	0x0010
+#define DHD_TIMER_VAL	0x0020
+#define DHD_HDRS_VAL	0x0040
+#define DHD_BYTES_VAL	0x0080
+#define DHD_INTR_VAL	0x0100
+#define DHD_LOG_VAL	0x0200
+#define DHD_GLOM_VAL	0x0400
+#define DHD_EVENT_VAL	0x0800
+#define DHD_BTA_VAL	0x1000
+#define DHD_ISCAN_VAL 0x2000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+	uint version;		/* To allow structure change tracking */
+	uint freq;		/* Max ticks between tx/rx attempts */
+	uint count;		/* Test packets to send/rcv each attempt */
+	uint print;		/* Print counts every <print> attempts */
+	uint total;		/* Total packets (or bursts) */
+	uint minlen;		/* Minimum length of packets to send */
+	uint maxlen;		/* Maximum length of packets to send */
+	uint numsent;		/* Count of test packets sent */
+	uint numrcvd;		/* Count of test packets received */
+	uint numfail;		/* Count of test send failures */
+	uint mode;		/* Test mode (type of test packets) */
+	uint stop;		/* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO		1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 	2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST	3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV		4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE	(-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE	0	/* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP   (-1)	/* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/epivers.h b/drivers/net/wireless/bcm4329/include/epivers.h
new file mode 100644
index 0000000..cd66a95
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/epivers.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.25 2005/10/28 18:35:33 Exp $
+ *
+*/
+
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	4
+
+#define	EPI_MINOR_VERSION	218
+
+#define	EPI_RC_NUMBER		248
+
+#define	EPI_INCREMENTAL_NUMBER	23
+
+#define	EPI_BUILD_NUMBER	0
+
+#define	EPI_VERSION		4, 218, 248, 23
+
+#define	EPI_VERSION_NUM		0x04daf817
+
+
+#define	EPI_VERSION_STR		"4.218.248.23"
+#define	EPI_ROUTER_VERSION_STR	"4.219.248.23"
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/hndpmu.h b/drivers/net/wireless/bcm4329/include/hndpmu.h
new file mode 100644
index 0000000..e829b3d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndpmu.h
@@ -0,0 +1,34 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h,v 13.14.4.3.4.3.8.7 2010/04/09 13:20:51 Exp $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h b/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h
new file mode 100644
index 0000000..ca3281b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h,v 13.3.196.2 2010/07/15 19:06:11 Exp $
+ */
+
+#ifndef	_hndrte_armtrap_h
+#define	_hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define	TRAP_STRIDE	4
+#define FIRST_TRAP	TR_RST
+#define LAST_TRAP	(TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define	MAX_TRAP_TYPE	(TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define	MAX_TRAP_TYPE	(TR_ISR + ARMCM3_NUMINTS)
+#endif	/* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define	TR_TYPE		0x00
+#define	TR_EPC		0x04
+#define	TR_CPSR		0x08
+#define	TR_SPSR		0x0c
+#define	TR_REGS		0x10
+#define	TR_REG(n)	(TR_REGS + (n) * 4)
+#define	TR_SP		TR_REG(13)
+#define	TR_LR		TR_REG(14)
+#define	TR_PC		TR_REG(15)
+
+#define	TRAP_T_SIZE	80
+
+#ifndef	_LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+	uint32		type;
+	uint32		epc;
+	uint32		cpsr;
+	uint32		spsr;
+	uint32		r0;
+	uint32		r1;
+	uint32		r2;
+	uint32		r3;
+	uint32		r4;
+	uint32		r5;
+	uint32		r6;
+	uint32		r7;
+	uint32		r8;
+	uint32		r9;
+	uint32		r10;
+	uint32		r11;
+	uint32		r12;
+	uint32		r13;
+	uint32		r14;
+	uint32		pc;
+} trap_t;
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+#endif	/* _hndrte_armtrap_h */
diff --git a/drivers/net/wireless/bcm4329/include/hndrte_cons.h b/drivers/net/wireless/bcm4329/include/hndrte_cons.h
new file mode 100644
index 0000000..a424174
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndrte_cons.h
@@ -0,0 +1,63 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h,v 13.1.2.4 2010/07/15 19:06:11 Exp $
+ */
+
+#include <typedefs.h>
+
+#define CBUF_LEN	(128)
+
+#define LOG_BUF_LEN	1024
+
+typedef struct {
+	uint32		buf;		/* Can't be pointer on (64-bit) hosts */
+	uint		buf_size;
+	uint		idx;
+	char		*_buf_compat;	/* Redundant pointer for backward compat. */
+} hndrte_log_t;
+
+typedef struct {
+	/* Virtual UART
+	 *   When there is no UART (e.g. Quickturn), the host should write a complete
+	 *   input line directly into cbuf and then write the length into vcons_in.
+	 *   This may also be used when there is a real UART (at risk of conflicting with
+	 *   the real UART).  vcons_out is currently unused.
+	 */
+	volatile uint	vcons_in;
+	volatile uint	vcons_out;
+
+	/* Output (logging) buffer
+	 *   Console output is written to a ring buffer log_buf at index log_idx.
+	 *   The host may read the output when it sees log_idx advance.
+	 *   Output will be lost if the output wraps around faster than the host polls.
+	 */
+	hndrte_log_t	log;
+
+	/* Console input line buffer
+	 *   Characters are read one at a time into cbuf until <CR> is received, then
+	 *   the buffer is processed as a command line.  Also used for virtual UART.
+	 */
+	uint		cbuf_idx;
+	char		cbuf[CBUF_LEN];
+} hndrte_cons_t;
diff --git a/drivers/net/wireless/bcm4329/include/hndsoc.h b/drivers/net/wireless/bcm4329/include/hndsoc.h
new file mode 100644
index 0000000..3542417
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/hndsoc.h
@@ -0,0 +1,195 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h,v 13.3.10.3 2008/08/06 03:43:25 Exp $
+ */
+
+#ifndef	_HNDSOC_H
+#define	_HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE		0x00000000	/* Physical SDRAM */
+#define SI_PCI_MEM		0x08000000	/* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ		(64 * 1024 * 1024)
+#define SI_PCI_CFG		0x0c000000	/* Host Mode sb2pcitranslation1 (64 MB) */
+#define	SI_SDRAM_SWAPPED	0x10000000	/* Byteswapped Physical SDRAM */
+
+#define SI_ENUM_BASE    	0x18000000	/* Enumeration space base */
+#define SI_CORE_SIZE    	0x1000		/* each core gets 4Kbytes for registers */
+#ifndef SI_MAXCORES
+#define	SI_MAXCORES		16		/* Max cores (this is arbitrary, for software
+						 * convenience and could be changed if we
+						 * make any larger chips
+						 */
+#endif
+
+#define	SI_FASTRAM		0x19000000	/* On-chip RAM on chips that also have DDR */
+
+#define	SI_FLASH2		0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define	SI_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
+#define	SI_ARMCM3_ROM		0x1e000000	/* ARM Cortex-M3 ROM */
+#define	SI_FLASH1		0x1fc00000	/* MIPS Flash Region 1 */
+#define	SI_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define	SI_ARM7S_ROM		0x20000000	/* ARM7TDMI-S ROM */
+#define	SI_ARMCM3_SRAM2		0x60000000	/* ARM Cortex-M3 SRAM Region 2 */
+#define	SI_ARM7S_SRAM2		0x80000000	/* ARM7TDMI-S SRAM Region 2 */
+#define	SI_ARM_FLASH1		0xffff0000	/* ARM Flash Region 1 */
+#define	SI_ARM_FLASH1_SZ	0x00010000	/* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA		0x40000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2		0x80000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ		0x40000000	/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32		0x00000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), low 32 bits
+						 */
+#define SI_PCIE_DMA_H32		0x80000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+
+/* core codes */
+#define	NODEV_CORE_ID		0x700		/* Invalid coreid */
+#define	CC_CORE_ID		0x800		/* chipcommon core */
+#define	ILINE20_CORE_ID		0x801		/* iline20 core */
+#define	SRAM_CORE_ID		0x802		/* sram core */
+#define	SDRAM_CORE_ID		0x803		/* sdram core */
+#define	PCI_CORE_ID		0x804		/* pci core */
+#define	MIPS_CORE_ID		0x805		/* mips core */
+#define	ENET_CORE_ID		0x806		/* enet mac core */
+#define	CODEC_CORE_ID		0x807		/* v90 codec core */
+#define	USB_CORE_ID		0x808		/* usb 1.1 host/device core */
+#define	ADSL_CORE_ID		0x809		/* ADSL core */
+#define	ILINE100_CORE_ID	0x80a		/* iline100 core */
+#define	IPSEC_CORE_ID		0x80b		/* ipsec core */
+#define	UTOPIA_CORE_ID		0x80c		/* utopia core */
+#define	PCMCIA_CORE_ID		0x80d		/* pcmcia core */
+#define	SOCRAM_CORE_ID		0x80e		/* internal memory core */
+#define	MEMC_CORE_ID		0x80f		/* memc sdram core */
+#define	OFDM_CORE_ID		0x810		/* OFDM phy core */
+#define	EXTIF_CORE_ID		0x811		/* external interface core */
+#define	D11_CORE_ID		0x812		/* 802.11 MAC core */
+#define	APHY_CORE_ID		0x813		/* 802.11a phy core */
+#define	BPHY_CORE_ID		0x814		/* 802.11b phy core */
+#define	GPHY_CORE_ID		0x815		/* 802.11g phy core */
+#define	MIPS33_CORE_ID		0x816		/* mips3302 core */
+#define	USB11H_CORE_ID		0x817		/* usb 1.1 host core */
+#define	USB11D_CORE_ID		0x818		/* usb 1.1 device core */
+#define	USB20H_CORE_ID		0x819		/* usb 2.0 host core */
+#define	USB20D_CORE_ID		0x81a		/* usb 2.0 device core */
+#define	SDIOH_CORE_ID		0x81b		/* sdio host core */
+#define	ROBO_CORE_ID		0x81c		/* roboswitch core */
+#define	ATA100_CORE_ID		0x81d		/* parallel ATA core */
+#define	SATAXOR_CORE_ID		0x81e		/* serial ATA & XOR DMA core */
+#define	GIGETH_CORE_ID		0x81f		/* gigabit ethernet core */
+#define	PCIE_CORE_ID		0x820		/* pci express core */
+#define	NPHY_CORE_ID		0x821		/* 802.11n 2x2 phy core */
+#define	SRAMC_CORE_ID		0x822		/* SRAM controller core */
+#define	MINIMAC_CORE_ID		0x823		/* MINI MAC/phy core */
+#define	ARM11_CORE_ID		0x824		/* ARM 1176 core */
+#define	ARM7S_CORE_ID		0x825		/* ARM7tdmi-s core */
+#define	LPPHY_CORE_ID		0x826		/* 802.11a/b/g phy core */
+#define	PMU_CORE_ID		0x827		/* PMU core */
+#define	SSNPHY_CORE_ID		0x828		/* 802.11n single-stream phy core */
+#define	SDIOD_CORE_ID		0x829		/* SDIO device core */
+#define	ARMCM3_CORE_ID		0x82a		/* ARM Cortex M3 core */
+#define	QNPHY_CORE_ID		0x82b		/* 802.11n 4x4 phy core */
+#define	MIPS74K_CORE_ID		0x82c		/* mips 74k core */
+#define	GMAC_CORE_ID		0x82d		/* Gigabit MAC core */
+#define	DMEMC_CORE_ID		0x82e		/* DDR1/2 memory controller core */
+#define	PCIERC_CORE_ID		0x82f		/* PCIE Root Complex core */
+#define	OCP_CORE_ID		0x830		/* OCP2OCP bridge core */
+#define	SC_CORE_ID		0x831		/* shared common core */
+#define	AHB_CORE_ID		0x832		/* OCP2AHB bridge core */
+#define	SPIH_CORE_ID		0x833		/* SPI host core */
+#define	I2S_CORE_ID		0x834		/* I2S core */
+#define OOB_ROUTER_CORE_ID	0x367		/* OOB router core ID */
+#define	DEF_AI_COMP		0xfff		/* Default component, in ai chips it maps all
+						 * unused address ranges
+						 */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define	SI_CC_IDX		0
+
+/* SOC Interconnect types (aka chip types) */
+#define	SOCI_SB			0
+#define	SOCI_AI			1
+
+/* Common core control flags */
+#define	SICF_BIST_EN		0x8000
+#define	SICF_PME_EN		0x4000
+#define	SICF_CORE_BITS		0x3ffc
+#define	SICF_FGC		0x0002
+#define	SICF_CLOCK_EN		0x0001
+
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST		0x1e0		/* clock control and status */
+
+/* clk_ctl_st register */
+#define	CCS_FORCEALP		0x00000001	/* force ALP request */
+#define	CCS_FORCEHT		0x00000002	/* force HT request */
+#define	CCS_FORCEILP		0x00000004	/* force ILP request */
+#define	CCS_ALPAREQ		0x00000008	/* ALP Avail Request */
+#define	CCS_HTAREQ		0x00000010	/* HT Avail Request */
+#define	CCS_FORCEHWREQOFF	0x00000020	/* Force HW Clock Request Off */
+#define	CCS_ALPAVAIL		0x00010000	/* ALP is available */
+#define	CCS_HTAVAIL		0x00020000	/* HT is available */
+#define	CCS0_HTAVAIL		0x00010000	/* HT avail in chipc and pcmcia on 4328a0 */
+#define	CCS0_ALPAVAIL		0x00020000	/* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size  */
+#define	BISZ_OFFSET		0x3e0		/* At this offset into the binary */
+#define	BISZ_MAGIC		0x4249535a	/* Marked with this value: 'BISZ' */
+#define	BISZ_MAGIC_IDX		0		/* Word 0: magic */
+#define	BISZ_TXTST_IDX		1		/*	1: text start */
+#define	BISZ_TXTEND_IDX		2		/*	2: text end */
+#define	BISZ_DATAST_IDX		3		/*	3: data start */
+#define	BISZ_DATAEND_IDX	4		/*	4: data end */
+#define	BISZ_BSSST_IDX		5		/*	5: bss start */
+#define	BISZ_BSSEND_IDX		6		/*	6: bss end */
+#define BISZ_SIZE		7		/* descriptor size in 32-bit intergers */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcm4329/include/linux_osl.h b/drivers/net/wireless/bcm4329/include/linux_osl.h
new file mode 100644
index 0000000..b059c2a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/linux_osl.h
@@ -0,0 +1,322 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h,v 13.131.30.8 2010/04/26 05:42:18 Exp $
+ */
+
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+
+#include <linuxver.h>
+
+
+#ifdef __GNUC__
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 30100
+#define	ASSERT(exp)		do {} while (0)
+#else
+
+#define	ASSERT(exp)
+#endif 
+#endif 
+
+
+#define	OSL_DELAY(usec)		osl_delay(usec)
+extern void osl_delay(uint usec);
+
+
+
+#define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+
+#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
+	osl_pci_read_config((osh), (offset), (size))
+#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+	osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+
+
+typedef struct {
+	bool pkttag;
+	uint pktalloced; 	
+	bool mmbus;		
+	pktfree_cb_fn_t tx_fn;  
+	void *tx_ctx;		
+} osl_pubinfo_t;
+
+
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
+	do { \
+	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
+	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
+	} while (0)
+
+
+#define BUS_SWAP32(v)		(v)
+
+
+#define	MALLOC(osh, size)	osl_malloc((osh), (size))
+#define	MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
+#define MALLOCED(osh)		osl_malloced((osh))
+
+
+#define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
+
+extern void *osl_malloc(osl_t *osh, uint size);
+extern void osl_mfree(osl_t *osh, void *addr, uint size);
+extern uint osl_malloced(osl_t *osh);
+extern uint osl_malloc_failed(osl_t *osh);
+
+
+#define	DMA_CONSISTENT_ALIGN	PAGE_SIZE
+#define	DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \
+	osl_dma_alloc_consistent((osh), (size), (pap))
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+
+
+#define	DMA_TX	1	
+#define	DMA_RX	2	
+
+
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction))
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+
+
+#include <bcmsdh.h>
+#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
+#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+
+#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+	mmap_op else bus_op
+#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+	mmap_op : bus_op
+
+
+
+
+#ifndef printf
+#define	printf(fmt, args...)	printk(fmt, ## args)
+#endif 
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+
+#ifndef IL_BIGENDIAN
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(uint8) ? readb((volatile uint8*)(r)) : \
+	sizeof(*(r)) == sizeof(uint16) ? readw((volatile uint16*)(r)) : \
+	readl((volatile uint32*)(r)), OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh,  \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+#else	
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)((uintptr)(r)^3)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)((uintptr)(r)^2)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh,  \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), \
+					(volatile uint8*)((uintptr)(r)^3)); break; \
+			case sizeof(uint16):	writew((uint16)(v), \
+					(volatile uint16*)((uintptr)(r)^2)); break; \
+			case sizeof(uint32):	writel((uint32)(v), \
+					(volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+#endif 
+
+#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+
+
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+
+#define OSL_UNCACHED(va)	((void*)va)
+
+
+#if defined(__i386__)
+#define	OSL_GETCYCLES(x)	rdtscl((x))
+#else
+#define OSL_GETCYCLES(x)	((x) = 0)
+#endif 
+
+
+#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
+
+
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define	REG_MAP(pa, size)	ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif 
+#define	REG_UNMAP(va)		iounmap((va))
+
+
+#define	R_SM(r)			*(r)
+#define	W_SM(r, v)		(*(r) = (v))
+#define	BZERO_SM(r, len)	memset((r), '\0', (len))
+
+
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
+#ifdef DHD_USE_STATIC_BUF
+#define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
+#define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
+#endif 
+#define	PKTDATA(osh, skb)		(((struct sk_buff*)(skb))->data)
+#define	PKTLEN(osh, skb)		(((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define	PKTNEXT(osh, skb)		(((struct sk_buff*)(skb))->next)
+#define	PKTSETNEXT(osh, skb, x)		(((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define	PKTSETLEN(osh, skb, len)	__skb_trim((struct sk_buff*)(skb), (len))
+#define	PKTPUSH(osh, skb, bytes)	skb_push((struct sk_buff*)(skb), (bytes))
+#define	PKTPULL(osh, skb, bytes)	skb_pull((struct sk_buff*)(skb), (bytes))
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
+#define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh)			((osl_pubinfo_t *)(osh))->pktalloced
+#define PKTSETPOOL(osh, skb, x, y)	do {} while (0)
+#define PKTPOOL(osh, skb)		FALSE
+#define PKTPOOLLEN(osh, pktp)		(0)
+#define PKTPOOLAVAIL(osh, pktp)		(0)
+#define PKTPOOLADD(osh, pktp, p)	BCME_ERROR
+#define PKTPOOLGET(osh, pktp)		NULL
+#define PKTLIST_DUMP(osh, buf)
+
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+
+
+
+static INLINE void *
+osl_pkt_frmnative(osl_pubinfo_t *osh, struct sk_buff *skb)
+{
+	struct sk_buff *nskb;
+
+	if (osh->pkttag)
+		bzero((void*)skb->cb, OSL_PKTTAG_SZ);
+
+	
+	for (nskb = skb; nskb; nskb = nskb->next) {
+		osh->pktalloced++;
+	}
+
+	return (void *)skb;
+}
+#define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb))
+
+
+static INLINE struct sk_buff *
+osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+
+	if (osh->pkttag)
+		bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+	
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		osh->pktalloced--;
+	}
+
+	return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt))
+
+#define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
+#define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
+#define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
+						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+#define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
+
+
+#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+
+#define	PKTBUFSZ	2048   
+
+
+#define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/linuxver.h b/drivers/net/wireless/bcm4329/include/linuxver.h
new file mode 100644
index 0000000..6ed2265
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/linuxver.h
@@ -0,0 +1,447 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h,v 13.38.8.1.8.6 2010/04/29 05:00:46 Exp $
+ */
+
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#include <linux/autoconf.h>
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif 
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define	MY_INIT_WORK(_work, _func, _data)	INIT_WORK(_work, _func)
+#else
+#define	MY_INIT_WORK(_work, _func, _data)	INIT_WORK(_work, _func, _data)
+typedef void (*work_func_t)(void *work);
+#endif	
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif	
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED	SA_SHIRQ
+#endif 
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef	CONFIG_NET_RADIO
+#define	CONFIG_WIRELESS_EXT
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#ifndef SANDGATE2G
+#define MOD_INC_USE_COUNT
+#endif 
+#endif 
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#define __devinit	__init
+#endif
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x)	x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev)		(dev)->sysdata
+#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
+
+
+
+struct pci_device_id {
+	unsigned int vendor, device;		
+	unsigned int subvendor, subdevice;	
+	unsigned int class, class_mask;		
+	unsigned long driver_data;		
+};
+
+struct pci_driver {
+	struct list_head node;
+	char *name;
+	const struct pci_device_id *id_table;	
+	int (*probe)(struct pci_dev *dev,
+	             const struct pci_device_id *id); 
+	void (*remove)(struct pci_dev *dev);	
+	void (*suspend)(struct pci_dev *dev);	
+	void (*resume)(struct pci_dev *dev);	
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif 
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x)	__initcall(x);
+#define module_exit(x)	__exitcall(x);
+#endif
+#endif	
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+
+
+#ifndef PCI_DMA_TODEVICE
+#define	PCI_DMA_TODEVICE	1
+#define	PCI_DMA_FROMDEVICE	2
+#endif
+
+typedef u32 dma_addr_t;
+
+
+static inline int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                                         dma_addr_t *dma_handle)
+{
+	void *ret;
+	int gfp = GFP_ATOMIC | GFP_DMA;
+
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_bus(ret);
+	}
+	return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                                       void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif 
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
+#define netif_down(dev)			do { (dev)->start = 0; } while (0)
+
+
+#ifndef _COMPAT_NETDEVICE_H
+
+
+
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define netif_queue_stopped(dev)	(dev)->tbusy
+#define netif_running(dev)		(dev)->start
+
+#endif 
+
+#define netif_device_attach(dev)	netif_start_queue(dev)
+#define netif_device_detach(dev)	netif_stop_queue(dev)
+
+
+#define tasklet_struct				tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+	queue_task(tasklet, &tq_immediate);
+	mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+                                void (*func)(unsigned long),
+                                unsigned long data)
+{
+	tasklet->next = NULL;
+	tasklet->sync = 0;
+	tasklet->routine = (void (*)(void *))func;
+	tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet)	{ do {} while (0); }
+
+
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif 
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+
+#define PREPARE_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		(_tq)->routine = _routine;			\
+		(_tq)->data = _data;				\
+	} while (0)
+
+
+#define INIT_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		INIT_LIST_HEAD(&(_tq)->list);			\
+		(_tq)->sync = 0;				\
+		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
+	} while (0)
+
+#endif	
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+
+
+
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_read_config_dword(dev, i * 4, &buffer[i]);
+	}
+	return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_write_config_dword(dev, i * 4, buffer[i]);
+	}
+	
+	else {
+		for (i = 0; i < 6; i ++)
+			pci_write_config_dword(dev,
+			                       PCI_BASE_ADDRESS_0 + (i * 4),
+			                       pci_resource_start(dev, i));
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+	return 0;
+}
+
+#endif 
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT		do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
+#endif
+#else 
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT			do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT			do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#endif 
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev)		kfree(dev)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#define af_packet_priv			data
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW	CHECKSUM_PARTIAL
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#define KILL_PROC(pid, sig) \
+{ \
+	struct task_struct *tsk; \
+	tsk = pid_task(find_vpid(pid), PIDTYPE_PID); \
+	if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+	kill_proc(pid, sig, 1); \
+}
+#endif 
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/miniopt.h b/drivers/net/wireless/bcm4329/include/miniopt.h
new file mode 100644
index 0000000..3667fb1
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h,v 1.1.6.2 2009/01/14 23:52:48 Exp $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY	128	/* Max options */
+typedef struct miniopt {
+
+	/* These are persistent after miniopt_init() */
+	const char* name;		/* name for prompt in error strings */
+	const char* flags;		/* option chars that take no args */
+	bool longflags;		/* long options may be flags */
+	bool opt_end;		/* at end of options (passed a "--") */
+
+	/* These are per-call to miniopt() */
+
+	int consumed;		/* number of argv entries cosumed in
+				 * the most recent call to miniopt()
+				 */
+	bool positional;
+	bool good_int;		/* 'val' member is the result of a sucessful
+				 * strtol conversion of the option value
+				 */
+	char opt;
+	char key[MINIOPT_MAXKEY];
+	char* valstr;		/* positional param, or value for the option,
+				 * or null if the option had
+				 * no accompanying value
+				 */
+	uint uval;		/* strtol translation of valstr */
+	int  val;		/* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif  /* MINI_OPT_H  */
diff --git a/drivers/net/wireless/bcm4329/include/msgtrace.h b/drivers/net/wireless/bcm4329/include/msgtrace.h
new file mode 100644
index 0000000..1479086
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/msgtrace.h
@@ -0,0 +1,72 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h,v 1.1.2.4 2009/01/27 04:09:40 Exp $
+ */
+
+#ifndef	_MSGTRACE_H
+#define	_MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+	uint8	version;
+	uint8   spare;
+	uint16	len;	/* Len of the trace */
+	uint32	seqnum;	/* Sequence number of message. Useful if the messsage has been lost
+			 * because of DMA error or a bus reset (ex: SDIO Func2)
+			 */
+	uint32  discarded_bytes;  /* Number of discarded bytes because of trace overflow  */
+	uint32  discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN 	sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+                                     uint16 hdrlen, uint8 *buf, uint16 buflen);
+
+extern void msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcm4329/include/osl.h b/drivers/net/wireless/bcm4329/include/osl.h
new file mode 100644
index 0000000..5599e53
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/osl.h
@@ -0,0 +1,55 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: osl.h,v 13.37.32.1 2008/11/20 00:51:15 Exp $
+ */
+
+
+#ifndef _osl_h_
+#define _osl_h_
+
+
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ	32 
+
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+#include <linux_osl.h>
+
+
+
+
+#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif   
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif   
+
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/packed_section_end.h b/drivers/net/wireless/bcm4329/include/packed_section_end.h
new file mode 100644
index 0000000..5b61c18
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/packed_section_end.h
@@ -0,0 +1,54 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h,v 1.1.6.3 2008/12/10 00:27:54 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+	#undef BWL_PACKED_SECTION
+#else
+	#error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+
+#undef	BWL_PRE_PACKED_STRUCT
+#undef	BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcm4329/include/packed_section_start.h b/drivers/net/wireless/bcm4329/include/packed_section_start.h
new file mode 100644
index 0000000..cb93aa6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/packed_section_start.h
@@ -0,0 +1,61 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h,v 1.1.6.3 2008/12/10 00:27:54 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+	#error "BWL_PACKED_SECTION is already defined!"
+#else
+	#define BWL_PACKED_SECTION
+#endif
+
+
+
+
+
+#if defined(__GNUC__)
+	#define	BWL_PRE_PACKED_STRUCT
+	#define	BWL_POST_PACKED_STRUCT	__attribute__((packed))
+#elif defined(__CC_ARM)
+	#define	BWL_PRE_PACKED_STRUCT	__packed
+	#define	BWL_POST_PACKED_STRUCT
+#else
+	#error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcm4329/include/pcicfg.h b/drivers/net/wireless/bcm4329/include/pcicfg.h
new file mode 100644
index 0000000..898962c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/pcicfg.h
@@ -0,0 +1,52 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h,v 1.41.12.3 2008/06/26 22:49:41 Exp $
+ */
+
+
+#ifndef	_h_pcicfg_
+#define	_h_pcicfg_
+
+
+#define	PCI_CFG_VID		0
+#define	PCI_CFG_CMD		4
+#define	PCI_CFG_REV		8
+#define	PCI_CFG_BAR0		0x10
+#define	PCI_CFG_BAR1		0x14
+#define	PCI_BAR0_WIN		0x80	
+#define	PCI_INT_STATUS		0x90	
+#define	PCI_INT_MASK		0x94	
+
+#define PCIE_EXTCFG_OFFSET	0x100
+#define	PCI_BAR0_PCIREGS_OFFSET	(6 * 1024)	
+#define	PCI_BAR0_PCISBR_OFFSET	(4 * 1024)	
+
+#define PCI_BAR0_WINSZ		(16 * 1024)	
+
+
+#define	PCI_16KB0_PCIREGS_OFFSET (8 * 1024)	
+#define	PCI_16KB0_CCREGS_OFFSET	(12 * 1024)	
+#define PCI_16KBB0_WINSZ	(16 * 1024)	
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/proto/802.11.h b/drivers/net/wireless/bcm4329/include/proto/802.11.h
new file mode 100644
index 0000000..fd26317
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/802.11.h
@@ -0,0 +1,1433 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h,v 9.219.4.1.4.5.6.11 2010/02/09 13:23:26 Exp $
+ */
+
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US			1024	
+
+
+#define DOT11_A3_HDR_LEN		24	
+#define DOT11_A4_HDR_LEN		30	
+#define DOT11_MAC_HDR_LEN		DOT11_A3_HDR_LEN	
+#define DOT11_FCS_LEN			4	
+#define DOT11_ICV_LEN			4	
+#define DOT11_ICV_AES_LEN		8	
+#define DOT11_QOS_LEN			2	
+#define DOT11_HTC_LEN			4	
+
+#define DOT11_KEY_INDEX_SHIFT		6	
+#define DOT11_IV_LEN			4	
+#define DOT11_IV_TKIP_LEN		8	
+#define DOT11_IV_AES_OCB_LEN		4	
+#define DOT11_IV_AES_CCM_LEN		8	
+#define DOT11_IV_MAX_LEN		8	
+
+
+#define DOT11_MAX_MPDU_BODY_LEN		2304	
+
+#define DOT11_MAX_MPDU_LEN		(DOT11_A4_HDR_LEN + \
+					 DOT11_QOS_LEN + \
+					 DOT11_IV_AES_CCM_LEN + \
+					 DOT11_MAX_MPDU_BODY_LEN + \
+					 DOT11_ICV_LEN + \
+					 DOT11_FCS_LEN)	
+
+#define DOT11_MAX_SSID_LEN		32	
+
+
+#define DOT11_DEFAULT_RTS_LEN		2347	
+#define DOT11_MAX_RTS_LEN		2347	
+
+
+#define DOT11_MIN_FRAG_LEN		256	
+#define DOT11_MAX_FRAG_LEN		2346	
+#define DOT11_DEFAULT_FRAG_LEN		2346	
+
+
+#define DOT11_MIN_BEACON_PERIOD		1	
+#define DOT11_MAX_BEACON_PERIOD		0xFFFF	
+
+
+#define DOT11_MIN_DTIM_PERIOD		1	
+#define DOT11_MAX_DTIM_PERIOD		0xFF	
+
+
+#define DOT11_LLC_SNAP_HDR_LEN		8	
+#define DOT11_OUI_LEN			3	
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+	uint8	dsap;				
+	uint8	ssap;				
+	uint8	ctl;				
+	uint8	oui[DOT11_OUI_LEN];		
+	uint16	type;				
+} BWL_POST_PACKED_STRUCT;
+
+
+#define RFC1042_HDR_LEN	(ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)	
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	a1;		
+	struct ether_addr	a2;		
+	struct ether_addr	a3;		
+	uint16			seq;		
+	struct ether_addr	a4;		
+} BWL_POST_PACKED_STRUCT;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	ra;		
+	struct ether_addr	ta;		
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_RTS_LEN		16		
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	ra;		
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CTS_LEN		10		
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	ra;		
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_ACK_LEN		10		
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	bssid;		
+	struct ether_addr	ta;		
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_PS_POLL_LEN	16		
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	ra;		
+	struct ether_addr	bssid;		
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CS_END_LEN	16		
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1040];
+	struct dot11_action_wifi_vendor_specific* next_node;
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+#define DOT11_BA_CTL_POLICY_NORMAL	0x0000	
+#define DOT11_BA_CTL_POLICY_NOACK	0x0001	
+#define DOT11_BA_CTL_POLICY_MASK	0x0001	
+
+#define DOT11_BA_CTL_MTID		0x0002	
+#define DOT11_BA_CTL_COMPRESSED		0x0004	
+
+#define DOT11_BA_CTL_NUMMSDU_MASK	0x0FC0	
+#define DOT11_BA_CTL_NUMMSDU_SHIFT	6	
+
+#define DOT11_BA_CTL_TID_MASK		0xF000	
+#define DOT11_BA_CTL_TID_SHIFT		12	
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	ra;		
+	struct ether_addr	ta;		
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN	16		
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+	uint16			bar_control;	
+	uint16			seqnum;		
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN		4		
+
+#define DOT11_BA_BITMAP_LEN	128		
+#define DOT11_BA_CMP_BITMAP_LEN	8		
+
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+	uint16			ba_control;	
+	uint16			seqnum;		
+	uint8			bitmap[DOT11_BA_BITMAP_LEN];	
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN		4		
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+	uint16			fc;		
+	uint16			durid;		
+	struct ether_addr	da;		
+	struct ether_addr	sa;		
+	struct ether_addr	bssid;		
+	uint16			seq;		
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_MGMT_HDR_LEN	24		
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+	uint32			timestamp[2];
+	uint16			beacon_interval;
+	uint16			capability;
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_BCN_PRB_LEN	12		
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+	uint16			alg;		
+	uint16			seq;		
+	uint16			status;		
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN	6		
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+	uint16			capability;	
+	uint16			listen;		
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN	4	
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+	uint16			capability;	
+	uint16			listen;		
+	struct ether_addr	ap;		
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN	10	
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+	uint16			capability;	
+	uint16			status;		
+	uint16			aid;		
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN	6	
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+	uint8	category;
+	uint8	action;
+	uint8	token;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN	3	
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+	uint8	category;
+	uint8	action;
+	uint8	ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+	uint8	category;
+	uint8	action;
+	uint8	control;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE	1
+#define SM_PWRSAVE_MODE		2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+	uint8 id;
+	uint8 len;
+	uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+	uint8 min;
+	uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+	uint8 id;
+	uint8 len;
+	uint8 tx_pwr;
+	uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN	2 	
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+	uint8 id;
+	uint8 len;
+	uint8 first_channel;
+	uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+	uint8	id;		
+	uint8	len;		
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+	uint8	id;		
+	uint8	len;		
+	uint8	oui[3];		
+	uint8	type;           
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN	5
+#define BRCM_EXTCH_IE_TYPE	53	
+#define DOT11_EXTCH_IE_LEN	1
+#define DOT11_EXT_CH_MASK	0x03	
+#define DOT11_EXT_CH_UPPER	0x01	
+#define DOT11_EXT_CH_LOWER	0x03	
+#define DOT11_EXT_CH_NONE	0x00	
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+	uint8	category;
+	uint8	action;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN	2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+	uint8 id;	
+	uint8 len;	
+	uint8 mode;	
+	uint8 channel;	
+	uint8 count;	
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN	3	
+
+#define DOT11_CSA_MODE_ADVISORY		0	
+#define DOT11_CSA_MODE_NO_TX		1	
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+	uint8	category;
+	uint8	action;
+	dot11_chan_switch_ie_t chan_switch_ie;	
+	dot11_brcm_extch_ie_t extch_ie;		
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+	uint8 mode;	
+	uint8 reg;	
+	uint8 channel;	
+	uint8 count;	
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+	uint8 id;	
+	uint8 len;	
+	struct dot11_csa_body b; 
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	struct dot11_csa_body b;	
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN	4	
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	dot11_ext_csa_ie_t chan_switch_ie;	
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+	uint8	id;
+	uint8	len;
+	uint8	info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN	1	
+
+#define	DOT11_OBSS_COEX_INFO_REQ		0x01
+#define	DOT11_OBSS_COEX_40MHZ_INTOLERANT	0x02
+#define	DOT11_OBSS_COEX_20MHZ_WIDTH_REQ	0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+	uint8	id;
+	uint8	len;
+	uint8	regclass;
+	uint8	chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN	1	
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+	uint8 id;
+	uint8 len;
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+#define DOT11_EXTCAP_LEN	1
+
+
+
+#define DOT11_MEASURE_TYPE_BASIC 	0	
+#define DOT11_MEASURE_TYPE_CCA 		1	
+#define DOT11_MEASURE_TYPE_RPI		2	
+
+
+#define DOT11_MEASURE_MODE_ENABLE 	(1<<1)	
+#define DOT11_MEASURE_MODE_REQUEST	(1<<2)	
+#define DOT11_MEASURE_MODE_REPORT 	(1<<3)	
+
+#define DOT11_MEASURE_MODE_LATE 	(1<<0)	
+#define DOT11_MEASURE_MODE_INCAPABLE	(1<<1)	
+#define DOT11_MEASURE_MODE_REFUSED	(1<<2)	
+
+#define DOT11_MEASURE_BASIC_MAP_BSS	((uint8)(1<<0))	
+#define DOT11_MEASURE_BASIC_MAP_OFDM	((uint8)(1<<1))	
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN	((uint8)(1<<2))	
+#define DOT11_MEASURE_BASIC_MAP_RADAR	((uint8)(1<<3))	
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS	((uint8)(1<<4))	
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14	
+
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3	
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 channel;
+			uint8 start_time[8];
+			uint16 duration;
+			uint8 map;
+		} BWL_POST_PACKED_STRUCT basic;
+		uint8 data[1];
+	} BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+
+#define DOT11_MNG_IE_MREP_FIXED_LEN	3	
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN	12	
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+	uint8 id;
+	uint8 len;
+	uint8 count;	
+	uint8 period;	
+	uint16 duration;	
+	uint16 offset;	
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+	uint8 channel;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+	uint8 id;
+	uint8 len;
+	uint8 eaddr[ETHER_ADDR_LEN];
+	uint8 interval;
+	chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+
+#define WME_OUI			"\x00\x50\xf2"	
+#define WME_VER			1	
+#define WME_TYPE		2	
+#define WME_SUBTYPE_IE		0	
+#define WME_SUBTYPE_PARAM_IE	1	
+#define WME_SUBTYPE_TSPEC	2	
+
+
+#define AC_BE			0	
+#define AC_BK			1	
+#define AC_VI			2	
+#define AC_VO			3	
+#define AC_COUNT		4	
+
+typedef uint8 ac_bitmap_t;	
+
+#define AC_BITMAP_NONE		0x0	
+#define AC_BITMAP_ALL		0xf	
+#define AC_BITMAP_TST(ab, ac)	(((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac)	(((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7	
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+	uint8	ACI;
+	uint8	ECW;
+	uint16  TXOP;		
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN            24          
+
+
+#define WME_QI_AP_APSD_MASK         0x80        
+#define WME_QI_AP_APSD_SHIFT        7           
+#define WME_QI_AP_COUNT_MASK        0x0f        
+#define WME_QI_AP_COUNT_SHIFT       0           
+
+
+#define WME_QI_STA_MAXSPLEN_MASK    0x60        
+#define WME_QI_STA_MAXSPLEN_SHIFT   5           
+#define WME_QI_STA_APSD_ALL_MASK    0xf         
+#define WME_QI_STA_APSD_ALL_SHIFT   0           
+#define WME_QI_STA_APSD_BE_MASK     0x8         
+#define WME_QI_STA_APSD_BE_SHIFT    3           
+#define WME_QI_STA_APSD_BK_MASK     0x4         
+#define WME_QI_STA_APSD_BK_SHIFT    2           
+#define WME_QI_STA_APSD_VI_MASK     0x2         
+#define WME_QI_STA_APSD_VI_SHIFT    1           
+#define WME_QI_STA_APSD_VO_MASK     0x1         
+#define WME_QI_STA_APSD_VO_SHIFT    0           
+
+
+#define EDCF_AIFSN_MIN               1           
+#define EDCF_AIFSN_MAX               15          
+#define EDCF_AIFSN_MASK              0x0f        
+#define EDCF_ACM_MASK                0x10        
+#define EDCF_ACI_MASK                0x60        
+#define EDCF_ACI_SHIFT               5           
+#define EDCF_AIFSN_SHIFT             12          
+
+
+#define EDCF_ECW_MIN                 0           
+#define EDCF_ECW_MAX                 15          
+#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK             0x0f        
+#define EDCF_ECWMAX_MASK             0xf0        
+#define EDCF_ECWMAX_SHIFT            4           
+
+
+#define EDCF_TXOP_MIN                0           
+#define EDCF_TXOP_MAX                65535       
+#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
+
+
+#define NON_EDCF_AC_BE_ACI_STA          0x02
+
+
+#define EDCF_AC_BE_ACI_STA           0x03	
+#define EDCF_AC_BE_ECW_STA           0xA4	
+#define EDCF_AC_BE_TXOP_STA          0x0000	
+#define EDCF_AC_BK_ACI_STA           0x27	
+#define EDCF_AC_BK_ECW_STA           0xA4	
+#define EDCF_AC_BK_TXOP_STA          0x0000	
+#define EDCF_AC_VI_ACI_STA           0x42	
+#define EDCF_AC_VI_ECW_STA           0x43	
+#define EDCF_AC_VI_TXOP_STA          0x005e	
+#define EDCF_AC_VO_ACI_STA           0x62	
+#define EDCF_AC_VO_ECW_STA           0x32	
+#define EDCF_AC_VO_TXOP_STA          0x002f	
+
+
+#define EDCF_AC_BE_ACI_AP            0x03	
+#define EDCF_AC_BE_ECW_AP            0x64	
+#define EDCF_AC_BE_TXOP_AP           0x0000	
+#define EDCF_AC_BK_ACI_AP            0x27	
+#define EDCF_AC_BK_ECW_AP            0xA4	
+#define EDCF_AC_BK_TXOP_AP           0x0000	
+#define EDCF_AC_VI_ACI_AP            0x41	
+#define EDCF_AC_VI_ECW_AP            0x43	
+#define EDCF_AC_VI_TXOP_AP           0x005e	
+#define EDCF_AC_VO_ACI_AP            0x61	
+#define EDCF_AC_VO_ECW_AP            0x32	
+#define EDCF_AC_VO_TXOP_AP           0x002f	
+
+
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN            18          
+
+
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+	uint8 id; 			
+	uint8 length;
+	uint16 station_count; 		
+	uint8 channel_utilization;	
+	uint16 aac; 			
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+
+
+#define FIXED_MSDU_SIZE 0x8000		
+#define MSDU_SIZE_MASK	0x7fff		
+
+
+
+#define	INTEGER_SHIFT	13	
+#define FRACTION_MASK	0x1FFF	
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+	uint8 category;			
+	uint8 action;
+	uint8 token;
+	uint8 status;
+	uint8 data[1];			
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4	
+
+
+#define WME_ADDTS_REQUEST	0	
+#define WME_ADDTS_RESPONSE	1	
+#define WME_DELTS_REQUEST	2	
+
+
+#define WME_ADMISSION_ACCEPTED		0	
+#define WME_INVALID_PARAMETERS		1	
+#define WME_ADMISSION_REFUSED		3	
+
+
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+
+#define DOT11_OPEN_SYSTEM	0	
+#define DOT11_SHARED_KEY	1	
+
+#define DOT11_OPEN_SHARED	2	
+#define DOT11_CHALLENGE_LEN	128	
+
+
+#define FC_PVER_MASK		0x3	
+#define FC_PVER_SHIFT		0	
+#define FC_TYPE_MASK		0xC	
+#define FC_TYPE_SHIFT		2	
+#define FC_SUBTYPE_MASK		0xF0	
+#define FC_SUBTYPE_SHIFT	4	
+#define FC_TODS			0x100	
+#define FC_TODS_SHIFT		8	
+#define FC_FROMDS		0x200	
+#define FC_FROMDS_SHIFT		9	
+#define FC_MOREFRAG		0x400	
+#define FC_MOREFRAG_SHIFT	10	
+#define FC_RETRY		0x800	
+#define FC_RETRY_SHIFT		11	
+#define FC_PM			0x1000	
+#define FC_PM_SHIFT		12	
+#define FC_MOREDATA		0x2000	
+#define FC_MOREDATA_SHIFT	13	
+#define FC_WEP			0x4000	
+#define FC_WEP_SHIFT		14	
+#define FC_ORDER		0x8000	
+#define FC_ORDER_SHIFT		15	
+
+
+#define SEQNUM_SHIFT		4	
+#define SEQNUM_MAX		0x1000	
+#define FRAGNUM_MASK		0xF	
+
+
+
+
+#define FC_TYPE_MNG		0	
+#define FC_TYPE_CTL		1	
+#define FC_TYPE_DATA		2	
+
+
+#define FC_SUBTYPE_ASSOC_REQ		0	
+#define FC_SUBTYPE_ASSOC_RESP		1	
+#define FC_SUBTYPE_REASSOC_REQ		2	
+#define FC_SUBTYPE_REASSOC_RESP		3	
+#define FC_SUBTYPE_PROBE_REQ		4	
+#define FC_SUBTYPE_PROBE_RESP		5	
+#define FC_SUBTYPE_BEACON		8	
+#define FC_SUBTYPE_ATIM			9	
+#define FC_SUBTYPE_DISASSOC		10	
+#define FC_SUBTYPE_AUTH			11	
+#define FC_SUBTYPE_DEAUTH		12	
+#define FC_SUBTYPE_ACTION		13	
+#define FC_SUBTYPE_ACTION_NOACK		14	
+
+
+#define FC_SUBTYPE_CTL_WRAPPER		7	
+#define FC_SUBTYPE_BLOCKACK_REQ		8	
+#define FC_SUBTYPE_BLOCKACK		9	
+#define FC_SUBTYPE_PS_POLL		10	
+#define FC_SUBTYPE_RTS			11	
+#define FC_SUBTYPE_CTS			12	
+#define FC_SUBTYPE_ACK			13	
+#define FC_SUBTYPE_CF_END		14	
+#define FC_SUBTYPE_CF_END_ACK		15	
+
+
+#define FC_SUBTYPE_DATA			0	
+#define FC_SUBTYPE_DATA_CF_ACK		1	
+#define FC_SUBTYPE_DATA_CF_POLL		2	
+#define FC_SUBTYPE_DATA_CF_ACK_POLL	3	
+#define FC_SUBTYPE_NULL			4	
+#define FC_SUBTYPE_CF_ACK		5	
+#define FC_SUBTYPE_CF_POLL		6	
+#define FC_SUBTYPE_CF_ACK_POLL		7	
+#define FC_SUBTYPE_QOS_DATA		8	
+#define FC_SUBTYPE_QOS_DATA_CF_ACK	9	
+#define FC_SUBTYPE_QOS_DATA_CF_POLL	10	
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL	11	
+#define FC_SUBTYPE_QOS_NULL		12	
+#define FC_SUBTYPE_QOS_CF_POLL		14	
+#define FC_SUBTYPE_QOS_CF_ACK_POLL	15	
+
+
+#define FC_SUBTYPE_ANY_QOS(s)		(((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s)		(((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s)	(((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s)	(((s) & 1) != 0)
+
+
+#define FC_KIND_MASK		(FC_TYPE_MASK | FC_SUBTYPE_MASK)	
+
+#define FC_KIND(t, s)	(((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))	
+
+#define FC_SUBTYPE(fc)	(((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)	
+#define FC_TYPE(fc)	(((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)	
+
+#define FC_ASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)	
+#define FC_ASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)	
+#define FC_REASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)	
+#define FC_REASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)	
+#define FC_PROBE_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)	
+#define FC_PROBE_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)	
+#define FC_BEACON	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)		
+#define FC_DISASSOC	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)	
+#define FC_AUTH		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)		
+#define FC_DEAUTH	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)		
+#define FC_ACTION	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)		
+#define FC_ACTION_NOACK	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)	
+
+#define FC_CTL_WRAPPER	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)	
+#define FC_BLOCKACK_REQ	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)	
+#define FC_BLOCKACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)	
+#define FC_PS_POLL	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)	
+#define FC_RTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)		
+#define FC_CTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)		
+#define FC_ACK		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)		
+#define FC_CF_END	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)		
+#define FC_CF_END_ACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)	
+
+#define FC_DATA		FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)		
+#define FC_NULL_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)		
+#define FC_DATA_CF_ACK	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)	
+#define FC_QOS_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)	
+#define FC_QOS_NULL	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)	
+
+
+
+
+#define QOS_PRIO_SHIFT		0	
+#define QOS_PRIO_MASK		0x0007	
+#define QOS_PRIO(qos)		(((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)	
+
+
+#define QOS_TID_SHIFT		0	
+#define QOS_TID_MASK		0x000f	
+#define QOS_TID(qos)		(((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)	
+
+
+#define QOS_EOSP_SHIFT		4	
+#define QOS_EOSP_MASK		0x0010	
+#define QOS_EOSP(qos)		(((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)	
+
+
+#define QOS_ACK_NORMAL_ACK	0	
+#define QOS_ACK_NO_ACK		1	
+#define QOS_ACK_NO_EXP_ACK	2	
+#define QOS_ACK_BLOCK_ACK	3	
+#define QOS_ACK_SHIFT		5	
+#define QOS_ACK_MASK		0x0060	
+#define QOS_ACK(qos)		(((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)	
+
+
+#define QOS_AMSDU_SHIFT		7	
+#define QOS_AMSDU_MASK		0x0080	
+
+
+
+
+
+
+#define DOT11_MNG_AUTH_ALGO_LEN		2	
+#define DOT11_MNG_AUTH_SEQ_LEN		2	
+#define DOT11_MNG_BEACON_INT_LEN	2	
+#define DOT11_MNG_CAP_LEN		2	
+#define DOT11_MNG_AP_ADDR_LEN		6	
+#define DOT11_MNG_LISTEN_INT_LEN	2	
+#define DOT11_MNG_REASON_LEN		2	
+#define DOT11_MNG_AID_LEN		2	
+#define DOT11_MNG_STATUS_LEN		2	
+#define DOT11_MNG_TIMESTAMP_LEN		8	
+
+
+#define DOT11_AID_MASK			0x3fff	
+
+
+#define DOT11_RC_RESERVED		0	
+#define DOT11_RC_UNSPECIFIED		1	
+#define DOT11_RC_AUTH_INVAL		2	
+#define DOT11_RC_DEAUTH_LEAVING		3	
+#define DOT11_RC_INACTIVITY		4	
+#define DOT11_RC_BUSY			5	
+#define DOT11_RC_INVAL_CLASS_2		6	
+#define DOT11_RC_INVAL_CLASS_3		7	
+#define DOT11_RC_DISASSOC_LEAVING	8	
+#define DOT11_RC_NOT_AUTH		9	
+#define DOT11_RC_BAD_PC			10	
+#define DOT11_RC_BAD_CHANNELS		11	
+
+
+
+#define DOT11_RC_UNSPECIFIED_QOS	32	
+#define DOT11_RC_INSUFFCIENT_BW		33	
+#define DOT11_RC_EXCESSIVE_FRAMES	34	
+#define DOT11_RC_TX_OUTSIDE_TXOP	35	
+#define DOT11_RC_LEAVING_QBSS		36	
+#define DOT11_RC_BAD_MECHANISM		37	
+#define DOT11_RC_SETUP_NEEDED		38	
+#define DOT11_RC_TIMEOUT		39	
+
+#define DOT11_RC_MAX			23	
+
+
+#define DOT11_SC_SUCCESS		0	
+#define DOT11_SC_FAILURE		1	
+#define DOT11_SC_CAP_MISMATCH		10	
+#define DOT11_SC_REASSOC_FAIL		11	
+#define DOT11_SC_ASSOC_FAIL		12	
+#define DOT11_SC_AUTH_MISMATCH		13	
+#define DOT11_SC_AUTH_SEQ		14	
+#define DOT11_SC_AUTH_CHALLENGE_FAIL	15	
+#define DOT11_SC_AUTH_TIMEOUT		16	
+#define DOT11_SC_ASSOC_BUSY_FAIL	17	
+#define DOT11_SC_ASSOC_RATE_MISMATCH	18	
+#define DOT11_SC_ASSOC_SHORT_REQUIRED	19	
+#define DOT11_SC_ASSOC_PBCC_REQUIRED	20	
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED	21	
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED	22	
+#define DOT11_SC_ASSOC_BAD_POWER_CAP	23	
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS	24	
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED	25	
+#define DOT11_SC_ASSOC_ERPBCC_REQUIRED	26	
+#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED	27	
+
+#define	DOT11_SC_DECLINED		37	
+#define	DOT11_SC_INVALID_PARAMS		38	
+
+
+#define DOT11_MNG_DS_PARAM_LEN			1	
+#define DOT11_MNG_IBSS_PARAM_LEN		2	
+
+
+#define DOT11_MNG_TIM_FIXED_LEN			3	
+#define DOT11_MNG_TIM_DTIM_COUNT		0	
+#define DOT11_MNG_TIM_DTIM_PERIOD		1	
+#define DOT11_MNG_TIM_BITMAP_CTL		2	
+#define DOT11_MNG_TIM_PVB			3	
+
+
+#define TLV_TAG_OFF		0	
+#define TLV_LEN_OFF		1	
+#define TLV_HDR_LEN		2	
+#define TLV_BODY_OFF		2	
+
+
+#define DOT11_MNG_SSID_ID			0	
+#define DOT11_MNG_RATES_ID			1	
+#define DOT11_MNG_FH_PARMS_ID			2	
+#define DOT11_MNG_DS_PARMS_ID			3	
+#define DOT11_MNG_CF_PARMS_ID			4	
+#define DOT11_MNG_TIM_ID			5	
+#define DOT11_MNG_IBSS_PARMS_ID			6	
+#define DOT11_MNG_COUNTRY_ID			7	
+#define DOT11_MNG_HOPPING_PARMS_ID		8	
+#define DOT11_MNG_HOPPING_TABLE_ID		9	
+#define DOT11_MNG_REQUEST_ID			10	
+#define DOT11_MNG_QBSS_LOAD_ID 			11	
+#define DOT11_MNG_EDCA_PARAM_ID			12	
+#define DOT11_MNG_CHALLENGE_ID			16	
+#define DOT11_MNG_PWR_CONSTRAINT_ID		32	
+#define DOT11_MNG_PWR_CAP_ID			33	
+#define DOT11_MNG_TPC_REQUEST_ID 		34	
+#define DOT11_MNG_TPC_REPORT_ID			35	
+#define DOT11_MNG_SUPP_CHANNELS_ID		36	
+#define DOT11_MNG_CHANNEL_SWITCH_ID		37	
+#define DOT11_MNG_MEASURE_REQUEST_ID		38	
+#define DOT11_MNG_MEASURE_REPORT_ID		39	
+#define DOT11_MNG_QUIET_ID			40	
+#define DOT11_MNG_IBSS_DFS_ID			41	
+#define DOT11_MNG_ERP_ID			42	
+#define DOT11_MNG_TS_DELAY_ID			43	
+#define	DOT11_MNG_HT_CAP			45	
+#define DOT11_MNG_QOS_CAP_ID			46	
+#define DOT11_MNG_NONERP_ID			47	
+#define DOT11_MNG_RSN_ID			48	
+#define DOT11_MNG_EXT_RATES_ID			50	
+#define	DOT11_MNG_REGCLASS_ID			59	
+#define DOT11_MNG_EXT_CSA_ID			60	
+#define	DOT11_MNG_HT_ADD			61	
+#define	DOT11_MNG_EXT_CHANNEL_OFFSET		62	
+#define DOT11_MNG_WAPI_ID				68	
+#define	DOT11_MNG_HT_BSS_COEXINFO_ID		72	
+#define	DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID	73	
+#define	DOT11_MNG_HT_OBSS_ID			74	
+#define	DOT11_MNG_EXT_CAP			127	
+#define DOT11_MNG_WPA_ID			221	
+#define DOT11_MNG_PROPR_ID			221	
+
+
+#define DOT11_RATE_BASIC			0x80	
+#define DOT11_RATE_MASK				0x7F	
+
+
+#define DOT11_MNG_ERP_LEN			1	
+#define DOT11_MNG_NONERP_PRESENT		0x01	
+#define DOT11_MNG_USE_PROTECTION		0x02	
+#define DOT11_MNG_BARKER_PREAMBLE		0x04	
+
+#define DOT11_MGN_TS_DELAY_LEN		4	
+#define TS_DELAY_FIELD_SIZE			4	
+
+
+#define DOT11_CAP_ESS				0x0001	
+#define DOT11_CAP_IBSS				0x0002	
+#define DOT11_CAP_POLLABLE			0x0004	
+#define DOT11_CAP_POLL_RQ			0x0008	
+#define DOT11_CAP_PRIVACY			0x0010	
+#define DOT11_CAP_SHORT				0x0020	
+#define DOT11_CAP_PBCC				0x0040	
+#define DOT11_CAP_AGILITY			0x0080	
+#define DOT11_CAP_SPECTRUM			0x0100	
+#define DOT11_CAP_SHORTSLOT			0x0400	
+#define DOT11_CAP_CCK_OFDM			0x2000	
+
+
+#define DOT11_OBSS_COEX_MNG_SUPPORT	0x01	
+
+
+#define DOT11_ACTION_HDR_LEN		2	
+#define DOT11_ACTION_CAT_ERR_MASK	0x80	
+#define DOT11_ACTION_CAT_MASK		0x7F	
+#define DOT11_ACTION_CAT_SPECT_MNG	0	
+#define DOT11_ACTION_CAT_BLOCKACK	3	
+#define DOT11_ACTION_CAT_PUBLIC		4	
+#define DOT11_ACTION_CAT_HT		7	
+#define DOT11_ACTION_CAT_VS			127	
+#define DOT11_ACTION_NOTIFICATION	0x11	
+
+#define DOT11_ACTION_ID_M_REQ		0	
+#define DOT11_ACTION_ID_M_REP		1	
+#define DOT11_ACTION_ID_TPC_REQ		2	
+#define DOT11_ACTION_ID_TPC_REP		3	
+#define DOT11_ACTION_ID_CHANNEL_SWITCH	4	
+#define DOT11_ACTION_ID_EXT_CSA		5	
+
+
+#define DOT11_ACTION_ID_HT_CH_WIDTH	0	
+#define DOT11_ACTION_ID_HT_MIMO_PS	1	
+
+
+#define DOT11_PUB_ACTION_BSS_COEX_MNG	0	
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH	4	
+
+
+#define DOT11_BA_ACTION_ADDBA_REQ	0	
+#define DOT11_BA_ACTION_ADDBA_RESP	1	
+#define DOT11_BA_ACTION_DELBA		2	
+
+
+#define DOT11_ADDBA_PARAM_AMSDU_SUP	0x0001	
+#define DOT11_ADDBA_PARAM_POLICY_MASK	0x0002	
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT	1	
+#define DOT11_ADDBA_PARAM_TID_MASK	0x003c	
+#define DOT11_ADDBA_PARAM_TID_SHIFT	2	
+#define DOT11_ADDBA_PARAM_BSIZE_MASK	0xffc0	
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT	6	
+
+#define DOT11_ADDBA_POLICY_DELAYED	0	
+#define DOT11_ADDBA_POLICY_IMMEDIATE	1	
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+	uint8 category;				
+	uint8 action;				
+	uint8 token;				
+	uint16 addba_param_set;			
+	uint16 timeout;				
+	uint16 start_seqnum;			
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN		9	
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+	uint8 category;				
+	uint8 action;				
+	uint8 token;				
+	uint16 status;				
+	uint16 addba_param_set;			
+	uint16 timeout;				
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN		9	
+
+
+#define DOT11_DELBA_PARAM_INIT_MASK	0x0800	
+#define DOT11_DELBA_PARAM_INIT_SHIFT	11	
+#define DOT11_DELBA_PARAM_TID_MASK	0xf000	
+#define DOT11_DELBA_PARAM_TID_SHIFT	12	
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+	uint8 category;				
+	uint8 action;				
+	uint16 delba_param_set;			
+	uint16 reason;				
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN			6	
+
+
+#define DOT11_BSSTYPE_INFRASTRUCTURE		0	
+#define DOT11_BSSTYPE_INDEPENDENT		1	
+#define DOT11_BSSTYPE_ANY			2	
+#define DOT11_SCANTYPE_ACTIVE			0	
+#define DOT11_SCANTYPE_PASSIVE			1	
+
+
+#define PREN_PREAMBLE		24	
+#define PREN_MM_EXT		8	
+#define PREN_PREAMBLE_EXT	4	
+
+
+#define NPHY_RIFS_TIME		2	
+
+
+#define APHY_SLOT_TIME		9	
+#define APHY_SIFS_TIME		16	
+#define APHY_DIFS_TIME		(APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))	
+#define APHY_PREAMBLE_TIME	16	
+#define APHY_SIGNAL_TIME	4	
+#define APHY_SYMBOL_TIME	4	
+#define APHY_SERVICE_NBITS	16	
+#define APHY_TAIL_NBITS		6	
+#define	APHY_CWMIN		15	
+
+
+#define BPHY_SLOT_TIME		20	
+#define BPHY_SIFS_TIME		10	
+#define BPHY_DIFS_TIME		50	
+#define BPHY_PLCP_TIME		192	
+#define BPHY_PLCP_SHORT_TIME	96	
+#define	BPHY_CWMIN		31	
+
+
+#define DOT11_OFDM_SIGNAL_EXTENSION	6	
+
+#define PHY_CWMAX		1023	
+
+#define	DOT11_MAXNUMFRAGS	16	
+
+
+typedef struct d11cnt {
+	uint32		txfrag;		
+	uint32		txmulti;	
+	uint32		txfail;		
+	uint32		txretry;	
+	uint32		txretrie;	
+	uint32		rxdup;		
+	uint32		txrts;		
+	uint32		txnocts;	
+	uint32		txnoack;	
+	uint32		rxfrag;		
+	uint32		rxmulti;	
+	uint32		rxcrc;		
+	uint32		txfrmsnt;	
+	uint32		rxundec;	
+} d11cnt_t;
+
+
+#define BRCM_PROP_OUI		"\x00\x90\x4C"	
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
+	uint8 id;		
+	uint8 len;		
+	uint8 oui[3];		
+	uint8 type;		
+	uint16 cap;		
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_prop_ie_s brcm_prop_ie_t;
+
+#define BRCM_PROP_IE_LEN	6	
+
+#define DPT_IE_TYPE		2
+
+
+#define BRCM_OUI		"\x00\x10\x18"	
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+	uint8	id;		
+	uint8	len;		
+	uint8	oui[3];		
+	uint8	ver;		
+	uint8	assoc;		
+	uint8	flags;		
+	uint8	flags1;		
+	uint16	amsdu_mtu_pref;	
+} BWL_POST_PACKED_STRUCT;
+typedef	struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN		11	
+#define BRCM_IE_VER		2	
+#define BRCM_IE_LEGACY_AES_VER	1	
+
+
+#ifdef WLAFTERBURNER
+#define	BRF_ABCAP		0x1	
+#define	BRF_ABRQRD		0x2	
+#define BRF_ABCOUNTER_MASK	0xf0	
+#define BRF_ABCOUNTER_SHIFT	4	
+#endif 
+#define	BRF_LZWDS		0x4	
+#define	BRF_BLOCKACK		0x8	
+
+
+#define	BRF1_AMSDU		0x1	
+#define BRF1_WMEPS		0x4	
+#define BRF1_PSOFIX		0x8	
+
+#ifdef WLAFTERBURNER
+#define AB_WDS_TIMEOUT_MAX	15	
+#define AB_WDS_TIMEOUT_MIN	1	
+#endif 
+
+#define AB_GUARDCOUNT	10		
+
+#define MCSSET_LEN	16	
+#define MAX_MCS_NUM	(128)	
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+	uint16	cap;
+	uint8	params;
+	uint8	supp_mcs[MCSSET_LEN];
+	uint16	ext_htcap;
+	uint32	txbf_cap;
+	uint8	as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+	uint8	id;		
+	uint8	len;		
+	uint8	oui[3];		
+	uint8	type;           
+	ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+#define HT_PROP_IE_OVERHEAD	4	
+#define HT_CAP_IE_LEN	26
+#define HT_CAP_IE_TYPE	51
+
+#define HT_CAP_LDPC_CODING	0x0001	
+#define HT_CAP_40MHZ		0x0002  
+#define HT_CAP_MIMO_PS_MASK	0x000C  
+#define HT_CAP_MIMO_PS_SHIFT	0x0002	
+#define HT_CAP_MIMO_PS_OFF	0x0003	
+#define HT_CAP_MIMO_PS_RTS	0x0001	
+#define HT_CAP_MIMO_PS_ON	0x0000	
+#define HT_CAP_GF		0x0010	
+#define HT_CAP_SHORT_GI_20	0x0020	
+#define HT_CAP_SHORT_GI_40	0x0040	
+#define HT_CAP_TX_STBC		0x0080	
+#define HT_CAP_RX_STBC_MASK	0x0300	
+#define HT_CAP_RX_STBC_SHIFT	8	
+#define HT_CAP_DELAYED_BA	0x0400	
+#define HT_CAP_MAX_AMSDU	0x0800	
+#define HT_CAP_DSSS_CCK	0x1000	
+#define HT_CAP_PSMP		0x2000	
+#define HT_CAP_40MHZ_INTOLERANT 0x4000	
+#define HT_CAP_LSIG_TXOP	0x8000	
+
+#define HT_CAP_RX_STBC_NO		0x0	
+#define HT_CAP_RX_STBC_ONE_STREAM	0x1	
+#define HT_CAP_RX_STBC_TWO_STREAM	0x2	
+#define HT_CAP_RX_STBC_THREE_STREAM	0x3	
+
+#define HT_MAX_AMSDU		7935	
+#define HT_MIN_AMSDU		3835	
+
+#define HT_PARAMS_RX_FACTOR_MASK	0x03	
+#define HT_PARAMS_DENSITY_MASK		0x1C	
+#define HT_PARAMS_DENSITY_SHIFT	2	
+
+
+#define AMPDU_MAX_MPDU_DENSITY	7	
+#define AMPDU_RX_FACTOR_64K	3	
+#define AMPDU_RX_FACTOR_BASE	8*1024	
+#define AMPDU_DELIMITER_LEN	4	
+
+#define HT_CAP_EXT_PCO			0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK	0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT	1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK	0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT	8
+#define HT_CAP_EXT_HTC			0x0400
+#define HT_CAP_EXT_RD_RESP		0x0800
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+	uint8	ctl_ch;			
+	uint8	byte1;			
+	uint16	opmode;			
+	uint16	misc_bits;		
+	uint8	basic_mcs[MCSSET_LEN];  
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+	uint8	id;		
+	uint8	len;		
+	uint8	oui[3];		
+	uint8	type;		
+	ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN	22
+#define HT_ADD_IE_TYPE	52
+
+
+#define HT_BW_ANY		0x04	
+#define HT_RIFS_PERMITTED     	0x08	
+
+
+#define HT_OPMODE_MASK	        0x0003	
+#define HT_OPMODE_SHIFT	0	
+#define HT_OPMODE_PURE		0x0000	
+#define HT_OPMODE_OPTIONAL	0x0001	
+#define HT_OPMODE_HT20IN40	0x0002	
+#define HT_OPMODE_MIXED	0x0003	
+#define HT_OPMODE_NONGF	0x0004	
+#define DOT11N_TXBURST		0x0008	
+#define DOT11N_OBSS_NONHT	0x0010	
+
+
+#define HT_BASIC_STBC_MCS	0x007f	
+#define HT_DUAL_STBC_PROT	0x0080	
+#define HT_SECOND_BCN		0x0100	
+#define HT_LSIG_TXOP		0x0200	
+#define HT_PCO_ACTIVE		0x0400	
+#define HT_PCO_PHASE		0x0800	
+#define HT_DUALCTS_PROTECTION	0x0080	
+
+
+#define DOT11N_2G_TXBURST_LIMIT	6160	
+#define DOT11N_5G_TXBURST_LIMIT	3080	
+
+
+#define GET_HT_OPMODE(add_ie)		((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					>> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_MIXED)	
+#define HT_HT20_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_HT20IN40)	
+#define HT_OPTIONAL_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_OPTIONAL)	
+#define HT_USE_PROTECTION(add_ie)	(HT_HT20_PRESENT((add_ie)) || \
+					HT_MIXEDMODE_PRESENT((add_ie))) 
+#define HT_NONGF_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+					== HT_OPMODE_NONGF)	
+#define DOT11N_TXBURST_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+					== DOT11N_TXBURST)	
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+					== DOT11N_OBSS_NONHT)	
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+	uint16	passive_dwell;
+	uint16	active_dwell;
+	uint16	bss_widthscan_interval;
+	uint16	passive_total;
+	uint16	active_total;
+	uint16	chanwidth_transition_dly;
+	uint16	activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+	uint8	id;
+	uint8	len;
+	obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN	sizeof(obss_params_t)	
+
+
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+	uchar id;
+	uchar len;
+	uchar oui [3];
+	uchar data [1]; 	
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN		2	
+#define VNDR_IE_MIN_LEN		3	
+#define VNDR_IE_MAX_LEN		256	
+
+
+#define WPA_VERSION		1	
+#define WPA_OUI			"\x00\x50\xF2"	
+
+#define WPA2_VERSION		1	
+#define WPA2_VERSION_LEN	2	
+#define WPA2_OUI		"\x00\x0F\xAC"	
+
+#define WPA_OUI_LEN	3	
+
+
+#define RSN_AKM_NONE		0	
+#define RSN_AKM_UNSPECIFIED	1	
+#define RSN_AKM_PSK		2	
+
+
+#define DOT11_MAX_DEFAULT_KEYS	4	
+#define DOT11_MAX_KEY_SIZE	32	
+#define DOT11_MAX_IV_SIZE	16	
+#define DOT11_EXT_IV_FLAG	(1<<5)	
+#define DOT11_WPA_KEY_RSC_LEN   8       
+
+#define WEP1_KEY_SIZE		5	
+#define WEP1_KEY_HEX_SIZE	10	
+#define WEP128_KEY_SIZE		13	
+#define WEP128_KEY_HEX_SIZE	26	
+#define TKIP_MIC_SIZE		8	
+#define TKIP_EOM_SIZE		7	
+#define TKIP_EOM_FLAG		0x5a	
+#define TKIP_KEY_SIZE		32	
+#define TKIP_MIC_AUTH_TX	16	
+#define TKIP_MIC_AUTH_RX	24	
+#define TKIP_MIC_SUP_RX		TKIP_MIC_AUTH_TX	
+#define TKIP_MIC_SUP_TX		TKIP_MIC_AUTH_RX	
+#define AES_KEY_SIZE		16	
+#define AES_MIC_SIZE		8	
+
+#define SMS4_KEY_LEN		16
+#define SMS4_WPI_CBC_MAC_LEN	16
+
+
+#include <packed_section_end.h>
+
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/proto/802.11e.h b/drivers/net/wireless/bcm4329/include/proto/802.11e.h
new file mode 100644
index 0000000..1dd6f45
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/802.11e.h
@@ -0,0 +1,131 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h,v 1.5.56.1 2008/11/20 00:51:18 Exp $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN           2           /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF          2           /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET	0		/* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET		1		/* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET		2		/* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET		3		/* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+	uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+	uint8 oui[DOT11_OUI_LEN];	/* WME_OUI */
+	uint8 type;					/* WME_TYPE */
+	uint8 subtype;				/* WME_SUBTYPE_TSPEC */
+	uint8 version;				/* WME_VERSION */
+	tsinfo_t tsinfo;			/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;	/* Minimum Service Interval (us) */
+	uint32 max_srv_interval;	/* Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/* Inactivity Interval (us) */
+	uint32 suspension_interval; /* Suspension Interval (us) */
+	uint32 srv_start_time;		/* Service Start Time (us) */
+	uint32 min_data_rate;		/* Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/* Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/* Peak Data Rate (bps) */
+	uint32 max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint32 delay_bound;			/* Delay Bound (us) */
+	uint32 min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;			/* Surplus Bandwidth Allowance (range 1.0-8.0) */
+	uint16 medium_time;			/* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN	(sizeof(tspec_t))		/* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT		1	/* TS info. TID shift */
+#define TS_INFO_TID_MASK		(0xf << TS_INFO_TID_SHIFT)	/* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT	7	/* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK	(0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT	5	/* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK	(0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT		2		/* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK		(1 << TS_INFO_PSB_SHIFT)	/* TS info. PSB mask */
+#define TS_INFO_UPLINK			(0 << TS_INFO_DIRECTION_SHIFT)	/* TS info. uplink */
+#define TS_INFO_DOWNLINK		(1 << TS_INFO_DIRECTION_SHIFT)	/* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL	(3 << TS_INFO_DIRECTION_SHIFT)	/* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT	3	/* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK	(0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt)	((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt)	((((pt).octets[0]) & \
+	TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt)	((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt)	((((pt).octets[1]) & \
+	TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id)	((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+	((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio)	((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+	((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN		5	/* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF		3	/* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT		300	/* default ADDTS response timeout in ms */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED	0	/* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM	1	/* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW	3	/* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE	47	/* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS		36	/* STA leave QBSS */
+#define DOT11E_STATUS_END_TS				37	/* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS			38	/* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT		39	/* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcm4329/include/proto/802.1d.h b/drivers/net/wireless/bcm4329/include/proto/802.1d.h
new file mode 100644
index 0000000..45c728b
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/802.1d.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h,v 9.3 2007/04/10 21:33:06 Exp $
+ */
+
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+
+#define	PRIO_8021D_NONE		2	
+#define	PRIO_8021D_BK		1	
+#define	PRIO_8021D_BE		0	
+#define	PRIO_8021D_EE		3	
+#define	PRIO_8021D_CL		4	
+#define	PRIO_8021D_VI		5	
+#define	PRIO_8021D_VO		6	
+#define	PRIO_8021D_NC		7	
+#define	MAXPRIO			7	
+#define NUMPRIO			(MAXPRIO + 1)
+
+#define ALLPRIO		-1	
+
+
+#define PRIO2PREC(prio) \
+	(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmeth.h b/drivers/net/wireless/bcm4329/include/proto/bcmeth.h
new file mode 100644
index 0000000..fdb5a2a
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/bcmeth.h
@@ -0,0 +1,83 @@
+/*
+ * Broadcom Ethernettype  protocol definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h,v 9.9.46.1 2008/11/20 00:51:20 Exp $
+ */
+
+
+
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+
+
+
+
+#define	BCMILCP_SUBTYPE_RATE		1
+#define	BCMILCP_SUBTYPE_LINK		2
+#define	BCMILCP_SUBTYPE_CSA		3
+#define	BCMILCP_SUBTYPE_LARQ		4
+#define BCMILCP_SUBTYPE_VENDOR		5
+#define	BCMILCP_SUBTYPE_FLH		17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG	32769
+#define BCMILCP_SUBTYPE_CERT		32770
+#define BCMILCP_SUBTYPE_SES		32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED		0
+#define BCMILCP_BCM_SUBTYPE_EVENT		1
+#define BCMILCP_BCM_SUBTYPE_SES			2
+
+
+#define BCMILCP_BCM_SUBTYPE_DPT			4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH	8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION		0
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+	uint16	subtype;	
+	uint16	length;
+	uint8	version;	
+	uint8	oui[3];		
+	
+	uint16	usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmevent.h b/drivers/net/wireless/bcm4329/include/proto/bcmevent.h
new file mode 100644
index 0000000..1f8ecb1
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/bcmevent.h
@@ -0,0 +1,212 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h,v 9.34.4.1.20.16.64.1 2010/11/08 21:57:03 Exp $
+ *
+ */
+
+
+
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION		1	
+#define BCM_MSG_IFNAME_MAX		16	
+
+
+#define WLC_EVENT_MSG_LINK		0x01	
+#define WLC_EVENT_MSG_FLUSHTXQ		0x02	
+#define WLC_EVENT_MSG_GROUP		0x04	
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			
+	uint32	event_type;		
+	uint32	status;			
+	uint32	reason;			
+	uint32	auth_type;		
+	uint32	datalen;		
+	struct ether_addr	addr;	
+	char	ifname[BCM_MSG_IFNAME_MAX]; 
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+	struct ether_header eth;
+	bcmeth_hdr_t		bcm_hdr;
+	wl_event_msg_t		event;
+	
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN	(sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+
+#define WLC_E_SET_SSID		0	
+#define WLC_E_JOIN		1	
+#define WLC_E_START		2	
+#define WLC_E_AUTH		3	
+#define WLC_E_AUTH_IND		4	
+#define WLC_E_DEAUTH		5	
+#define WLC_E_DEAUTH_IND	6	
+#define WLC_E_ASSOC		7	
+#define WLC_E_ASSOC_IND		8	
+#define WLC_E_REASSOC		9	
+#define WLC_E_REASSOC_IND	10	
+#define WLC_E_DISASSOC		11	
+#define WLC_E_DISASSOC_IND	12	
+#define WLC_E_QUIET_START	13	
+#define WLC_E_QUIET_END		14	
+#define WLC_E_BEACON_RX		15	
+#define WLC_E_LINK		16	
+#define WLC_E_MIC_ERROR		17	
+#define WLC_E_NDIS_LINK		18	
+#define WLC_E_ROAM		19	
+#define WLC_E_TXFAIL		20	
+#define WLC_E_PMKID_CACHE	21	
+#define WLC_E_RETROGRADE_TSF	22	
+#define WLC_E_PRUNE		23	
+#define WLC_E_AUTOAUTH		24	
+#define WLC_E_EAPOL_MSG		25	
+#define WLC_E_SCAN_COMPLETE	26	
+#define WLC_E_ADDTS_IND		27	
+#define WLC_E_DELTS_IND		28	
+#define WLC_E_BCNSENT_IND	29	
+#define WLC_E_BCNRX_MSG		30	
+#define WLC_E_BCNLOST_MSG	31	
+#define WLC_E_ROAM_PREP		32	
+#define WLC_E_PFN_NET_FOUND	33	
+#define WLC_E_PFN_NET_LOST	34	
+#define WLC_E_RESET_COMPLETE	35
+#define WLC_E_JOIN_START	36
+#define WLC_E_ROAM_START	37
+#define WLC_E_ASSOC_START	38
+#define WLC_E_IBSS_ASSOC	39
+#define WLC_E_RADIO		40
+#define WLC_E_PSM_WATCHDOG	41	
+#define WLC_E_PROBREQ_MSG       44      
+#define WLC_E_SCAN_CONFIRM_IND  45
+#define WLC_E_PSK_SUP	46	
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define	WLC_E_EXCEEDED_MEDIUM_TIME 48	
+#define WLC_E_ICV_ERROR		49	
+#define WLC_E_UNICAST_DECODE_ERROR 50 
+#define WLC_E_MULTICAST_DECODE_ERROR 51 
+#define WLC_E_TRACE 52
+#define WLC_E_IF		54	
+#define WLC_E_RSSI		56	
+#define WLC_E_PFN_SCAN_COMPLETE	57	
+#define WLC_E_ACTION_FRAME      58      
+#define WLC_E_ACTION_FRAME_COMPLETE 59  
+
+#define WLC_E_ESCAN_RESULT	69
+#define WLC_E_WAKE_EVENT	70
+#define WLC_E_RELOAD		71
+#define WLC_E_LAST		72
+
+
+
+#define WLC_E_STATUS_SUCCESS		0	
+#define WLC_E_STATUS_FAIL		1	
+#define WLC_E_STATUS_TIMEOUT		2	
+#define WLC_E_STATUS_NO_NETWORKS	3	
+#define WLC_E_STATUS_ABORT		4	
+#define WLC_E_STATUS_NO_ACK		5	
+#define WLC_E_STATUS_UNSOLICITED	6	
+#define WLC_E_STATUS_ATTEMPT		7	
+#define WLC_E_STATUS_PARTIAL		8	
+#define WLC_E_STATUS_NEWSCAN	9	
+#define WLC_E_STATUS_NEWASSOC	10	
+#define WLC_E_STATUS_11HQUIET	11	
+#define WLC_E_STATUS_SUPPRESS	12	
+#define WLC_E_STATUS_NOCHANS	13	
+#define WLC_E_STATUS_CCXFASTRM	14	
+#define WLC_E_STATUS_CS_ABORT	15	
+
+
+#define WLC_E_REASON_INITIAL_ASSOC	0	
+#define WLC_E_REASON_LOW_RSSI		1	
+#define WLC_E_REASON_DEAUTH		2	
+#define WLC_E_REASON_DISASSOC		3	
+#define WLC_E_REASON_BCNS_LOST		4	
+#define WLC_E_REASON_FAST_ROAM_FAILED	5	
+#define WLC_E_REASON_DIRECTED_ROAM	6	
+#define WLC_E_REASON_TSPEC_REJECTED	7	
+#define WLC_E_REASON_BETTER_AP		8	
+
+
+#define WLC_E_PRUNE_ENCR_MISMATCH	1	
+#define WLC_E_PRUNE_BCAST_BSSID		2	
+#define WLC_E_PRUNE_MAC_DENY		3	
+#define WLC_E_PRUNE_MAC_NA		4	
+#define WLC_E_PRUNE_REG_PASSV		5	
+#define WLC_E_PRUNE_SPCT_MGMT		6	
+#define WLC_E_PRUNE_RADAR		7	
+#define WLC_E_RSN_MISMATCH		8	
+#define WLC_E_PRUNE_NO_COMMON_RATES	9	
+#define WLC_E_PRUNE_BASIC_RATES		10	
+#define WLC_E_PRUNE_CIPHER_NA		12	
+#define WLC_E_PRUNE_KNOWN_STA		13	
+#define WLC_E_PRUNE_WDS_PEER		15	
+#define WLC_E_PRUNE_QBSS_LOAD		16	
+#define WLC_E_PRUNE_HOME_AP		17	
+
+
+#define WLC_E_SUP_OTHER				0	
+#define WLC_E_SUP_DECRYPT_KEY_DATA	1	
+#define WLC_E_SUP_BAD_UCAST_WEP128	2	
+#define WLC_E_SUP_BAD_UCAST_WEP40	3	
+#define WLC_E_SUP_UNSUP_KEY_LEN		4	
+#define WLC_E_SUP_PW_KEY_CIPHER		5	
+#define WLC_E_SUP_MSG3_TOO_MANY_IE	6	
+#define WLC_E_SUP_MSG3_IE_MISMATCH	7	
+#define WLC_E_SUP_NO_INSTALL_FLAG	8	
+#define WLC_E_SUP_MSG3_NO_GTK		9	
+#define WLC_E_SUP_GRP_KEY_CIPHER	10	
+#define WLC_E_SUP_GRP_MSG1_NO_GTK	11	
+#define WLC_E_SUP_GTK_DECRYPT_FAIL	12	
+#define WLC_E_SUP_SEND_FAIL			13	
+#define WLC_E_SUP_DEAUTH			14	
+#define WLC_E_SUP_WPA_PSK_TMO       15  
+
+
+#define WLC_E_IF_ADD		1	
+#define WLC_E_IF_DEL		2	
+
+#define WLC_E_RELOAD_STATUS1	1
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/proto/bcmip.h b/drivers/net/wireless/bcm4329/include/proto/bcmip.h
new file mode 100644
index 0000000..9d2fd6f
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/bcmip.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h,v 9.16.186.4 2009/01/27 04:25:25 Exp $
+ */
+
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define IP_VER_OFFSET		0x0	
+#define IP_VER_MASK		0xf0	
+#define IP_VER_SHIFT		4	
+#define IP_VER_4		4	
+#define IP_VER_6		6	
+
+#define IP_VER(ip_body) \
+	((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP		0x1	
+#define IP_PROT_TCP		0x6	
+#define IP_PROT_UDP		0x11	
+
+
+#define IPV4_VER_HL_OFFSET	0	
+#define IPV4_TOS_OFFSET		1	
+#define IPV4_PKTLEN_OFFSET	2	
+#define IPV4_PKTFLAG_OFFSET	6	
+#define IPV4_PROT_OFFSET	9	
+#define IPV4_CHKSUM_OFFSET	10	
+#define IPV4_SRC_IP_OFFSET	12	
+#define IPV4_DEST_IP_OFFSET	16	
+#define IPV4_OPTIONS_OFFSET	20	
+
+
+#define IPV4_VER_MASK		0xf0	
+#define IPV4_VER_SHIFT		4	
+
+#define IPV4_HLEN_MASK		0x0f	
+#define IPV4_HLEN(ipv4_body)	(4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN		4	
+
+#define IPV4_ADDR_NULL(a)	((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+				  ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a)	((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+				  ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define	IPV4_TOS_DSCP_MASK	0xfc	
+#define	IPV4_TOS_DSCP_SHIFT	2	
+
+#define	IPV4_TOS(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define	IPV4_TOS_PREC_MASK	0xe0	
+#define	IPV4_TOS_PREC_SHIFT	5	
+
+#define IPV4_TOS_LOWDELAY	0x10	
+#define IPV4_TOS_THROUGHPUT	0x8	
+#define IPV4_TOS_RELIABILITY	0x4	
+
+#define IPV4_PROT(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV		0x8000	
+#define IPV4_FRAG_DONT		0x4000	
+#define IPV4_FRAG_MORE		0x2000	
+#define IPV4_FRAG_OFFSET_MASK	0x1fff	
+
+#define IPV4_ADDR_STR_LEN	16	
+
+
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+	uint8	addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+	uint8	version_ihl;		
+	uint8	tos;			
+	uint16	tot_len;		
+	uint16	id;
+	uint16	frag;			
+	uint8	ttl;			
+	uint8	prot;			
+	uint16	hdr_chksum;		
+	uint8	src_ip[IPV4_ADDR_LEN];	
+	uint8	dst_ip[IPV4_ADDR_LEN];	
+} BWL_POST_PACKED_STRUCT;
+
+
+#define IPV6_PAYLOAD_LEN_OFFSET	4	
+#define IPV6_NEXT_HDR_OFFSET	6	
+#define IPV6_HOP_LIMIT_OFFSET	7	
+#define IPV6_SRC_IP_OFFSET	8	
+#define IPV6_DEST_IP_OFFSET	24	
+
+
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+	 ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+	 (((uint8 *)(ipv6_body))[2] << 8) | \
+	 (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+	((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+	 ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+	(((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body)	IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN		16	
+
+
+#ifndef IP_TOS
+#define IP_TOS(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+#endif
+
+
+
+#include <packed_section_end.h>
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/proto/eapol.h b/drivers/net/wireless/bcm4329/include/proto/eapol.h
new file mode 100644
index 0000000..95e76ff
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/eapol.h
@@ -0,0 +1,172 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * $Id: eapol.h,v 9.18.260.1.2.1.6.6 2009/04/08 05:00:08 Exp $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define AKW_BLOCK_LEN	8	/* The only def we need here */
+
+/* EAPOL for 802.3/Ethernet */
+typedef struct {
+	struct ether_header eth;	/* 802.3/Ethernet header */
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+	unsigned char body[1];		/* Body (optional) */
+} eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION	2
+#define WPA_EAPOL_VERSION	1
+#define LEAP_EAPOL_VERSION	1
+#define SES_EAPOL_VERSION	1
+
+/* EAPOL types */
+#define EAP_PACKET		0
+#define EAPOL_START		1
+#define EAPOL_LOGOFF		2
+#define EAPOL_KEY		3
+#define EAPOL_ASF		4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY		1
+#define EAPOL_WPA2_KEY		2	/* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY		254	/* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN	8
+#define EAPOL_KEY_IV_LEN	16
+#define EAPOL_KEY_SIG_LEN	16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;			/* Key Descriptor Type */
+	unsigned short length;			/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char iv[EAPOL_KEY_IV_LEN];		/* Key IV */
+	unsigned char index;				/* Key Flags & Index */
+	unsigned char signature[EAPOL_KEY_SIG_LEN];	/* Key Signature */
+	unsigned char key[1];				/* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 	44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK	0x80
+#define EAPOL_KEY_BROADCAST	0
+#define EAPOL_KEY_UNICAST	0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK	0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN	8
+#define EAPOL_WPA_KEY_NONCE_LEN		32
+#define EAPOL_WPA_KEY_IV_LEN		16
+#define EAPOL_WPA_KEY_ID_LEN		8
+#define EAPOL_WPA_KEY_RSC_LEN		8
+#define EAPOL_WPA_KEY_MIC_LEN		16
+#define EAPOL_WPA_KEY_DATA_LEN		(EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE		32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;		/* Key Descriptor Type */
+	unsigned short key_info;	/* Key Information (unaligned) */
+	unsigned short key_len;		/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN];	/* Nonce */
+	unsigned char iv[EAPOL_WPA_KEY_IV_LEN];		/* Key IV */
+	unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN];	/* Key RSC */
+	unsigned char id[EAPOL_WPA_KEY_ID_LEN];		/* WPA:Key ID, 802.11i/WPA2: Reserved */
+	unsigned char mic[EAPOL_WPA_KEY_MIC_LEN];	/* Key MIC */
+	unsigned short data_len;			/* Key Data Length */
+	unsigned char data[EAPOL_WPA_KEY_DATA_LEN];	/* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 		95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1		0x01
+#define WPA_KEY_DESC_V2		0x02
+#define WPA_KEY_PAIRWISE	0x08
+#define WPA_KEY_INSTALL		0x40
+#define WPA_KEY_ACK		0x80
+#define WPA_KEY_MIC		0x100
+#define WPA_KEY_SECURE		0x200
+#define WPA_KEY_ERROR		0x400
+#define WPA_KEY_REQ		0x800
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0		0x00
+#define WPA_KEY_INDEX_1		0x10
+#define WPA_KEY_INDEX_2		0x20
+#define WPA_KEY_INDEX_3		0x30
+#define WPA_KEY_INDEX_MASK	0x30
+#define WPA_KEY_INDEX_SHIFT	0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA	0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 type;
+	uint8 length;
+	uint8 oui[3];
+	uint8 subtype;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 	6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK	1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY	2
+#define WPA2_KEY_DATA_SUBTYPE_MAC	3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID	4
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	flags;
+	uint8	reserved;
+	uint8	gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 	2
+
+#define WPA2_GTK_INDEX_MASK	0x03
+#define WPA2_GTK_INDEX_SHIFT	0x00
+
+#define WPA2_GTK_TRANSMIT	0x04
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	reserved[2];
+	uint8	mac[ETHER_ADDR_LEN];
+	uint8	stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD	0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/proto/ethernet.h b/drivers/net/wireless/bcm4329/include/proto/ethernet.h
new file mode 100644
index 0000000..9ad2ea0
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/ethernet.h
@@ -0,0 +1,148 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h,v 9.45.56.5 2010/02/22 22:04:36 Exp $
+ */
+
+
+#ifndef _NET_ETHERNET_H_	      
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define	ETHER_ADDR_LEN		6
+
+
+#define	ETHER_TYPE_LEN		2
+
+
+#define	ETHER_CRC_LEN		4
+
+
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+
+#define	ETHER_MIN_LEN		64
+
+
+#define	ETHER_MIN_DATA		46
+
+
+#define	ETHER_MAX_LEN		1518
+
+
+#define	ETHER_MAX_DATA		1500
+
+
+#define ETHER_TYPE_MIN		0x0600		
+#define	ETHER_TYPE_IP		0x0800		
+#define ETHER_TYPE_ARP		0x0806		
+#define ETHER_TYPE_8021Q	0x8100		
+#define	ETHER_TYPE_BRCM		0x886c		
+#define	ETHER_TYPE_802_1X	0x888e		
+#define ETHER_TYPE_WAI		0x88b4		
+#ifdef BCMWPA2
+#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7	
+#endif
+
+
+#define	ETHER_BRCM_SUBTYPE_LEN	4	
+#define	ETHER_BRCM_CRAM		1	
+
+
+#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)	
+#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)	
+#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)	
+
+
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+
+#ifndef __INCif_etherh       
+
+BWL_PRE_PACKED_STRUCT struct ether_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct	ether_addr {
+	uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif	
+
+
+#define ETHER_SET_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) 	(((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xd))
+#define ETHER_TOGGLE_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+
+#define ETHER_SET_UNICAST(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+
+#define	ether_cmp(a, b)	(!(((short*)a)[0] == ((short*)b)[0]) | \
+			 !(((short*)a)[1] == ((short*)b)[1]) | \
+			 !(((short*)a)[2] == ((short*)b)[2]))
+
+
+#define	ether_copy(s, d) { \
+		((short*)d)[0] = ((short*)s)[0]; \
+		((short*)d)[1] = ((short*)s)[1]; \
+		((short*)d)[2] = ((short*)s)[2]; }
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+
+#define ETHER_ISBCAST(ea)	((((uint8 *)(ea))[0] &		\
+	                          ((uint8 *)(ea))[1] &		\
+				  ((uint8 *)(ea))[2] &		\
+				  ((uint8 *)(ea))[3] &		\
+				  ((uint8 *)(ea))[4] &		\
+				  ((uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea)	((((uint8 *)(ea))[0] |		\
+				  ((uint8 *)(ea))[1] |		\
+				  ((uint8 *)(ea))[2] |		\
+				  ((uint8 *)(ea))[3] |		\
+				  ((uint8 *)(ea))[4] |		\
+				  ((uint8 *)(ea))[5]) == 0)
+
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/proto/sdspi.h b/drivers/net/wireless/bcm4329/include/proto/sdspi.h
new file mode 100644
index 0000000..7739e68
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/sdspi.h
@@ -0,0 +1,71 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h,v 9.1.20.1 2008/05/06 22:59:19 Exp $
+ */
+
+#define SPI_START_M		BITFIELD_MASK(1)	/* Bit [31] 	- Start Bit */
+#define SPI_START_S		31
+#define SPI_DIR_M		BITFIELD_MASK(1)	/* Bit [30] 	- Direction */
+#define SPI_DIR_S		30
+#define SPI_CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S		24
+#define SPI_RW_M		BITFIELD_MASK(1)	/* Bit [23] 	- Read=0, Write=1 */
+#define SPI_RW_S		23
+#define SPI_FUNC_M		BITFIELD_MASK(3)	/* Bits [22:20]	- Function Number */
+#define SPI_FUNC_S		20
+#define SPI_RAW_M		BITFIELD_MASK(1)	/* Bit [19] 	- Read After Wr */
+#define SPI_RAW_S		19
+#define SPI_STUFF_M		BITFIELD_MASK(1)	/* Bit [18] 	- Stuff bit */
+#define SPI_STUFF_S		18
+#define SPI_BLKMODE_M		BITFIELD_MASK(1)	/* Bit [19] 	- Blockmode 1=blk */
+#define SPI_BLKMODE_S		19
+#define SPI_OPCODE_M		BITFIELD_MASK(1)	/* Bit [18] 	- OP Code */
+#define SPI_OPCODE_S		18
+#define SPI_ADDR_M		BITFIELD_MASK(17)	/* Bits [17:1] 	- Address */
+#define SPI_ADDR_S		1
+#define SPI_STUFF0_M		BITFIELD_MASK(1)	/* Bit [0] 	- Stuff bit */
+#define SPI_STUFF0_S		0
+
+#define SPI_RSP_START_M		BITFIELD_MASK(1)	/* Bit [7] 	- Start Bit (always 0) */
+#define SPI_RSP_START_S		7
+#define SPI_RSP_PARAM_ERR_M	BITFIELD_MASK(1)	/* Bit [6] 	- Parameter Error */
+#define SPI_RSP_PARAM_ERR_S	6
+#define SPI_RSP_RFU5_M		BITFIELD_MASK(1)	/* Bit [5] 	- RFU (Always 0) */
+#define SPI_RSP_RFU5_S		5
+#define SPI_RSP_FUNC_ERR_M	BITFIELD_MASK(1)	/* Bit [4] 	- Function number error */
+#define SPI_RSP_FUNC_ERR_S	4
+#define SPI_RSP_CRC_ERR_M	BITFIELD_MASK(1)	/* Bit [3] 	- COM CRC Error */
+#define SPI_RSP_CRC_ERR_S	3
+#define SPI_RSP_ILL_CMD_M	BITFIELD_MASK(1)	/* Bit [2] 	- Illegal Command error */
+#define SPI_RSP_ILL_CMD_S	2
+#define SPI_RSP_RFU1_M		BITFIELD_MASK(1)	/* Bit [1] 	- RFU (Always 0) */
+#define SPI_RSP_RFU1_S		1
+#define SPI_RSP_IDLE_M		BITFIELD_MASK(1)	/* Bit [0] 	- In idle state */
+#define SPI_RSP_IDLE_S		0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN	6	/* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK	0xFE	/* SD Start Block Token */
+#define SDSPI_IDLE_PAD		0xFF	/* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK	0x80
diff --git a/drivers/net/wireless/bcm4329/include/proto/vlan.h b/drivers/net/wireless/bcm4329/include/proto/vlan.h
new file mode 100644
index 0000000..670bc44
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/vlan.h
@@ -0,0 +1,63 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h,v 9.4.196.2 2008/12/07 21:19:20 Exp $
+ */
+
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define VLAN_VID_MASK		0xfff	
+#define	VLAN_CFI_SHIFT		12	
+#define VLAN_PRI_SHIFT		13	
+
+#define VLAN_PRI_MASK		7	
+
+#define	VLAN_TAG_LEN		4
+#define	VLAN_TAG_OFFSET		(2 * ETHER_ADDR_LEN)	
+
+#define VLAN_TPID		0x8100	
+
+struct ethervlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	vlan_type;		
+	uint16	vlan_tag;		
+	uint16	ether_type;
+};
+
+#define	ETHERVLAN_HDR_LEN	(ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/proto/wpa.h b/drivers/net/wireless/bcm4329/include/proto/wpa.h
new file mode 100644
index 0000000..f5d0cd5
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/proto/wpa.h
@@ -0,0 +1,159 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h,v 1.16.166.1.20.1 2008/11/20 00:51:31 Exp $
+ */
+
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+
+#include <packed_section_start.h>
+
+
+
+
+#define DOT11_RC_INVALID_WPA_IE		13	
+#define DOT11_RC_MIC_FAILURE		14	
+#define DOT11_RC_4WH_TIMEOUT		15	
+#define DOT11_RC_GTK_UPDATE_TIMEOUT	16	
+#define DOT11_RC_WPA_IE_MISMATCH	17	
+#define DOT11_RC_INVALID_MC_CIPHER	18	
+#define DOT11_RC_INVALID_UC_CIPHER	19	
+#define DOT11_RC_INVALID_AKMP		20	
+#define DOT11_RC_BAD_WPA_VERSION	21	
+#define DOT11_RC_INVALID_WPA_CAP	22	
+#define DOT11_RC_8021X_AUTH_FAIL	23	
+
+#define WPA2_PMKID_LEN	16
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 tag;	
+	uint8 length;	
+	uint8 oui[3];	
+	uint8 oui_type;	
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN	4
+#define WPA_IE_FIXED_LEN	8
+#define WPA_IE_TAG_FIXED_LEN	6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 tag;	
+	uint8 length;	
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN	4
+#define WPA_RSN_IE_TAG_FIXED_LEN	2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 oui[3];
+	uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN	4
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN	2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+
+#define WPA_CIPHER_NONE		0	
+#define WPA_CIPHER_WEP_40	1	
+#define WPA_CIPHER_TKIP		2	
+#define WPA_CIPHER_AES_OCB	3	
+#define WPA_CIPHER_AES_CCM	4	
+#define WPA_CIPHER_WEP_104	5	
+
+#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
+				 (cipher) == WPA_CIPHER_WEP_40 || \
+				 (cipher) == WPA_CIPHER_WEP_104 || \
+				 (cipher) == WPA_CIPHER_TKIP || \
+				 (cipher) == WPA_CIPHER_AES_OCB || \
+				 (cipher) == WPA_CIPHER_AES_CCM)
+
+
+#define WPA_TKIP_CM_DETECT	60	
+#define WPA_TKIP_CM_BLOCK	60	
+
+
+#define RSN_CAP_LEN		2	
+
+
+#define RSN_CAP_PREAUTH			0x0001
+#define RSN_CAP_NOPAIRWISE		0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
+#define RSN_CAP_1_REPLAY_CNTR		0
+#define RSN_CAP_2_REPLAY_CNTRS		1
+#define RSN_CAP_4_REPLAY_CNTRS		2
+#define RSN_CAP_16_REPLAY_CNTRS		3
+
+
+#define WPA_CAP_4_REPLAY_CNTRS		RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS		RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+
+#define WPA_CAP_LEN	RSN_CAP_LEN	
+
+#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
+
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/sbchipc.h b/drivers/net/wireless/bcm4329/include/sbchipc.h
new file mode 100644
index 0000000..39e5c8d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbchipc.h
@@ -0,0 +1,1026 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer,
+ * gpio interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h,v 13.103.2.5.4.5.2.9 2009/07/03 14:23:21 Exp $
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+
+#ifndef	_SBCHIPC_H
+#define	_SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	
+
+typedef volatile struct {
+	uint32	chipid;			
+	uint32	capabilities;
+	uint32	corecontrol;		
+	uint32	bist;
+
+	
+	uint32	otpstatus;		
+	uint32	otpcontrol;
+	uint32	otpprog;
+	uint32	PAD;
+
+	
+	uint32	intstatus;		
+	uint32	intmask;
+	uint32	chipcontrol;		
+	uint32	chipstatus;		
+
+	
+	uint32	jtagcmd;		
+	uint32	jtagir;
+	uint32	jtagdr;
+	uint32	jtagctrl;
+
+	
+	uint32	flashcontrol;		
+	uint32	flashaddress;
+	uint32	flashdata;
+	uint32	PAD[1];
+
+	
+	uint32	broadcastaddress;	
+	uint32	broadcastdata;
+
+	
+	uint32	gpiopullup;		
+	uint32	gpiopulldown;		
+	uint32	gpioin;			
+	uint32	gpioout;
+	uint32	gpioouten;
+	uint32	gpiocontrol;
+	uint32	gpiointpolarity;
+	uint32	gpiointmask;
+
+	
+	uint32	gpioevent;
+	uint32	gpioeventintmask;
+
+	
+	uint32	watchdog;		
+
+	
+	uint32	gpioeventintpolarity;
+
+	
+	uint32  gpiotimerval;		
+	uint32  gpiotimeroutmask;
+
+	
+	uint32	clockcontrol_n;		
+	uint32	clockcontrol_sb;	
+	uint32	clockcontrol_pci;	
+	uint32	clockcontrol_m2;	
+	uint32	clockcontrol_m3;	
+	uint32	clkdiv;			
+	uint32	PAD[2];
+
+	
+	uint32	pll_on_delay;		
+	uint32	fref_sel_delay;
+	uint32	slow_clk_ctl;		
+	uint32	PAD[1];
+
+	
+	uint32	system_clk_ctl;		
+	uint32	clkstatestretch;
+	uint32	PAD[13];
+
+	
+	uint32	eromptr;
+
+	
+	uint32	pcmcia_config;		
+	uint32	pcmcia_memwait;
+	uint32	pcmcia_attrwait;
+	uint32	pcmcia_iowait;
+	uint32	ide_config;
+	uint32	ide_memwait;
+	uint32	ide_attrwait;
+	uint32	ide_iowait;
+	uint32	prog_config;
+	uint32	prog_waitcount;
+	uint32	flash_config;
+	uint32	flash_waitcount;
+	uint32	PAD[4];
+	uint32	PAD[40];
+
+
+	
+	uint32	clk_ctl_st;		
+	uint32	hw_war;
+	uint32	PAD[70];
+
+	
+	uint8	uart0data;		
+	uint8	uart0imr;
+	uint8	uart0fcr;
+	uint8	uart0lcr;
+	uint8	uart0mcr;
+	uint8	uart0lsr;
+	uint8	uart0msr;
+	uint8	uart0scratch;
+	uint8	PAD[248];		
+
+	uint8	uart1data;		
+	uint8	uart1imr;
+	uint8	uart1fcr;
+	uint8	uart1lcr;
+	uint8	uart1mcr;
+	uint8	uart1lsr;
+	uint8	uart1msr;
+	uint8	uart1scratch;
+	uint32	PAD[126];
+
+	
+	uint32	pmucontrol;		
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		
+	uint32	gpioenable;		
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	
+	uint32	chipcontrol_data;	
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	PAD[102];
+	uint16	otp[768];
+} chipcregs_t;
+
+#endif 
+
+#define	CC_CHIPID		0
+#define	CC_CAPABILITIES		4
+#define CC_OTPST		0x10
+#define CC_CHIPST		0x2c
+#define	CC_JTAGCMD		0x30
+#define	CC_JTAGIR		0x34
+#define	CC_JTAGDR		0x38
+#define	CC_JTAGCTRL		0x3c
+#define	CC_WATCHDOG		0x80
+#define	CC_CLKC_N		0x90
+#define	CC_CLKC_M0		0x94
+#define	CC_CLKC_M1		0x98
+#define	CC_CLKC_M2		0x9c
+#define	CC_CLKC_M3		0xa0
+#define	CC_CLKDIV		0xa4
+#define	CC_SYS_CLK_CTL		0xc0
+#define	CC_CLK_CTL_ST		SI_CLK_CTL_ST
+#define	CC_EROMPTR		0xfc
+#define	PMU_CTL			0x600
+#define	PMU_CAP			0x604
+#define	PMU_ST			0x608
+#define PMU_RES_STATE		0x60c
+#define PMU_TIMER		0x614
+#define	PMU_MIN_RES_MASK	0x618
+#define	PMU_MAX_RES_MASK	0x61c
+#define PMU_REG_CONTROL_ADDR	0x658
+#define PMU_REG_CONTROL_DATA	0x65C
+#define PMU_PLL_CONTROL_ADDR 	0x660
+#define PMU_PLL_CONTROL_DATA 	0x664
+#define	CC_OTP			0x800		
+
+
+#define	CID_ID_MASK		0x0000ffff	
+#define	CID_REV_MASK		0x000f0000	
+#define	CID_REV_SHIFT		16		
+#define	CID_PKG_MASK		0x00f00000	
+#define	CID_PKG_SHIFT		20		
+#define	CID_CC_MASK		0x0f000000	
+#define CID_CC_SHIFT		24
+#define	CID_TYPE_MASK		0xf0000000	
+#define CID_TYPE_SHIFT		28
+
+
+#define	CC_CAP_UARTS_MASK	0x00000003	
+#define CC_CAP_MIPSEB		0x00000004	
+#define CC_CAP_UCLKSEL		0x00000018	
+#define CC_CAP_UINTCLK		0x00000008	
+#define CC_CAP_UARTGPIO		0x00000020	
+#define CC_CAP_EXTBUS_MASK	0x000000c0	
+#define CC_CAP_EXTBUS_NONE	0x00000000	
+#define CC_CAP_EXTBUS_FULL	0x00000040	
+#define CC_CAP_EXTBUS_PROG	0x00000080	
+#define	CC_CAP_FLASH_MASK	0x00000700	
+#define	CC_CAP_PLL_MASK		0x00038000	
+#define CC_CAP_PWR_CTL		0x00040000	
+#define CC_CAP_OTPSIZE		0x00380000	
+#define CC_CAP_OTPSIZE_SHIFT	19		
+#define CC_CAP_OTPSIZE_BASE	5		
+#define CC_CAP_JTAGP		0x00400000	
+#define CC_CAP_ROM		0x00800000	
+#define CC_CAP_BKPLN64		0x08000000	
+#define	CC_CAP_PMU		0x10000000	
+#define	CC_CAP_ECI		0x20000000	
+
+
+#define PLL_NONE		0x00000000
+#define PLL_TYPE1		0x00010000	
+#define PLL_TYPE2		0x00020000	
+#define PLL_TYPE3		0x00030000	
+#define PLL_TYPE4		0x00008000	
+#define PLL_TYPE5		0x00018000	
+#define PLL_TYPE6		0x00028000	
+#define PLL_TYPE7		0x00038000	
+
+
+#define	ILP_CLOCK		32000
+
+
+#define	ALP_CLOCK		20000000
+
+
+#define	HT_CLOCK		80000000
+
+
+#define CC_UARTCLKO		0x00000001	
+#define	CC_SE			0x00000002	
+#define CC_UARTCLKEN		0x00000008	
+
+
+#define CHIPCTRL_4321A0_DEFAULT	0x3a4
+#define CHIPCTRL_4321A1_DEFAULT	0x0a4
+#define CHIPCTRL_4321_PLL_DOWN	0x800000	
+
+
+#define OTPS_OL_MASK		0x000000ff
+#define OTPS_OL_MFG		0x00000001	
+#define OTPS_OL_OR1		0x00000002	
+#define OTPS_OL_OR2		0x00000004	
+#define OTPS_OL_GU		0x00000008	
+#define OTPS_GUP_MASK		0x00000f00
+#define OTPS_GUP_SHIFT		8
+#define OTPS_GUP_HW		0x00000100	
+#define OTPS_GUP_SW		0x00000200	
+#define OTPS_GUP_CI		0x00000400	
+#define OTPS_GUP_FUSE		0x00000800	
+#define OTPS_READY		0x00001000
+#define OTPS_RV(x)		(1 << (16 + (x)))	
+#define OTPS_RV_MASK		0x0fff0000
+
+
+#define OTPC_PROGSEL		0x00000001
+#define OTPC_PCOUNT_MASK	0x0000000e
+#define OTPC_PCOUNT_SHIFT	1
+#define OTPC_VSEL_MASK		0x000000f0
+#define OTPC_VSEL_SHIFT		4
+#define OTPC_TMM_MASK		0x00000700
+#define OTPC_TMM_SHIFT		8
+#define OTPC_ODM		0x00000800
+#define OTPC_PROGEN		0x80000000
+
+
+#define OTPP_COL_MASK		0x000000ff
+#define OTPP_COL_SHIFT		0
+#define OTPP_ROW_MASK		0x0000ff00
+#define OTPP_ROW_SHIFT		8
+#define OTPP_OC_MASK		0x0f000000
+#define OTPP_OC_SHIFT		24
+#define OTPP_READERR		0x10000000
+#define OTPP_VALUE_MASK		0x20000000
+#define OTPP_VALUE_SHIFT	29
+#define OTPP_START_BUSY		0x80000000
+
+
+#define OTPPOC_READ		0
+#define OTPPOC_BIT_PROG		1
+#define OTPPOC_VERIFY		3
+#define OTPPOC_INIT		4
+#define OTPPOC_SET		5
+#define OTPPOC_RESET		6
+#define OTPPOC_OCST		7
+#define OTPPOC_ROW_LOCK		8
+#define OTPPOC_PRESCN_TEST	9
+
+
+#define JCMD_START		0x80000000
+#define JCMD_BUSY		0x80000000
+#define JCMD_STATE_MASK		0x60000000
+#define JCMD_STATE_TLR		0x00000000	
+#define JCMD_STATE_PIR		0x20000000	
+#define JCMD_STATE_PDR		0x40000000	
+#define JCMD_STATE_RTI		0x60000000	
+#define JCMD0_ACC_MASK		0x0000f000
+#define JCMD0_ACC_IRDR		0x00000000
+#define JCMD0_ACC_DR		0x00001000
+#define JCMD0_ACC_IR		0x00002000
+#define JCMD0_ACC_RESET		0x00003000
+#define JCMD0_ACC_IRPDR		0x00004000
+#define JCMD0_ACC_PDR		0x00005000
+#define JCMD0_IRW_MASK		0x00000f00
+#define JCMD_ACC_MASK		0x000f0000	
+#define JCMD_ACC_IRDR		0x00000000
+#define JCMD_ACC_DR		0x00010000
+#define JCMD_ACC_IR		0x00020000
+#define JCMD_ACC_RESET		0x00030000
+#define JCMD_ACC_IRPDR		0x00040000
+#define JCMD_ACC_PDR		0x00050000
+#define JCMD_ACC_PIR		0x00060000
+#define JCMD_ACC_IRDR_I		0x00070000	
+#define JCMD_ACC_DR_I		0x00080000	
+#define JCMD_IRW_MASK		0x00001f00
+#define JCMD_IRW_SHIFT		8
+#define JCMD_DRW_MASK		0x0000003f
+
+
+#define JCTRL_FORCE_CLK		4		
+#define JCTRL_EXT_EN		2		
+#define JCTRL_EN		1		
+
+
+#define	CLKD_SFLASH		0x0f000000
+#define	CLKD_SFLASH_SHIFT	24
+#define	CLKD_OTP		0x000f0000
+#define	CLKD_OTP_SHIFT		16
+#define	CLKD_JTAG		0x00000f00
+#define	CLKD_JTAG_SHIFT		8
+#define	CLKD_UART		0x000000ff
+
+
+#define	CI_GPIO			0x00000001	
+#define	CI_EI			0x00000002	
+#define	CI_TEMP			0x00000004	
+#define	CI_SIRQ			0x00000008	
+#define	CI_ECI			0x00000010	
+#define	CI_PMU			0x00000020	
+#define	CI_UART			0x00000040	
+#define	CI_WDRESET		0x80000000	
+
+
+#define SCC_SS_MASK		0x00000007	
+#define	SCC_SS_LPO		0x00000000	
+#define	SCC_SS_XTAL		0x00000001	
+#define	SCC_SS_PCI		0x00000002	
+#define SCC_LF			0x00000200	
+#define SCC_LP			0x00000400	
+#define SCC_FS			0x00000800	
+#define SCC_IP			0x00001000	
+#define SCC_XC			0x00002000	
+#define SCC_XP			0x00004000	
+#define SCC_CD_MASK		0xffff0000	
+#define SCC_CD_SHIFT		16
+
+
+#define	SYCC_IE			0x00000001	
+#define	SYCC_AE			0x00000002	
+#define	SYCC_FP			0x00000004	
+#define	SYCC_AR			0x00000008	
+#define	SYCC_HR			0x00000010	
+#define SYCC_CD_MASK		0xffff0000	
+#define SYCC_CD_SHIFT		16
+
+
+#define	CF_EN			0x00000001	
+#define	CF_EM_MASK		0x0000000e	
+#define	CF_EM_SHIFT		1
+#define	CF_EM_FLASH		0		
+#define	CF_EM_SYNC		2		
+#define	CF_EM_PCMCIA		4		
+#define	CF_DS			0x00000010	
+#define	CF_BS			0x00000020	
+#define	CF_CD_MASK		0x000000c0	
+#define	CF_CD_SHIFT		6
+#define	CF_CD_DIV2		0x00000000	
+#define	CF_CD_DIV3		0x00000040	
+#define	CF_CD_DIV4		0x00000080	
+#define	CF_CE			0x00000100	
+#define	CF_SB			0x00000200	
+
+
+#define	PM_W0_MASK		0x0000003f	
+#define	PM_W1_MASK		0x00001f00	
+#define	PM_W1_SHIFT		8
+#define	PM_W2_MASK		0x001f0000	
+#define	PM_W2_SHIFT		16
+#define	PM_W3_MASK		0x1f000000	
+#define	PM_W3_SHIFT		24
+
+
+#define	PA_W0_MASK		0x0000003f	
+#define	PA_W1_MASK		0x00001f00	
+#define	PA_W1_SHIFT		8
+#define	PA_W2_MASK		0x001f0000	
+#define	PA_W2_SHIFT		16
+#define	PA_W3_MASK		0x1f000000	
+#define	PA_W3_SHIFT		24
+
+
+#define	PI_W0_MASK		0x0000003f	
+#define	PI_W1_MASK		0x00001f00	
+#define	PI_W1_SHIFT		8
+#define	PI_W2_MASK		0x001f0000	
+#define	PI_W2_SHIFT		16
+#define	PI_W3_MASK		0x1f000000	
+#define	PI_W3_SHIFT		24
+
+
+#define	PW_W0_MASK		0x0000001f	
+#define	PW_W1_MASK		0x00001f00	
+#define	PW_W1_SHIFT		8
+#define	PW_W2_MASK		0x001f0000	
+#define	PW_W2_SHIFT		16
+#define	PW_W3_MASK		0x1f000000	
+#define	PW_W3_SHIFT		24
+
+#define PW_W0       		0x0000000c
+#define PW_W1       		0x00000a00
+#define PW_W2       		0x00020000
+#define PW_W3       		0x01000000
+
+
+#define	FW_W0_MASK		0x0000003f	
+#define	FW_W1_MASK		0x00001f00	
+#define	FW_W1_SHIFT		8
+#define	FW_W2_MASK		0x001f0000	
+#define	FW_W2_SHIFT		16
+#define	FW_W3_MASK		0x1f000000	
+#define	FW_W3_SHIFT		24
+
+
+#define WATCHDOG_CLOCK		48000000	
+#define WATCHDOG_CLOCK_5354 	32000		
+
+
+#define	PCTL_ILP_DIV_MASK	0xffff0000
+#define	PCTL_ILP_DIV_SHIFT	16
+#define PCTL_PLL_PLLCTL_UPD	0x00000400	
+#define PCTL_NOILP_ON_WAIT	0x00000200	
+#define	PCTL_HT_REQ_EN		0x00000100
+#define	PCTL_ALP_REQ_EN		0x00000080
+#define	PCTL_XTALFREQ_MASK	0x0000007c
+#define	PCTL_XTALFREQ_SHIFT	2
+#define	PCTL_ILP_DIV_EN		0x00000002
+#define	PCTL_LPO_SEL		0x00000001
+
+
+#define CSTRETCH_HT		0xffff0000
+#define CSTRETCH_ALP		0x0000ffff
+
+
+#define GPIO_ONTIME_SHIFT	16
+
+
+#define	CN_N1_MASK		0x3f		
+#define	CN_N2_MASK		0x3f00		
+#define	CN_N2_SHIFT		8
+#define	CN_PLLC_MASK		0xf0000		
+#define	CN_PLLC_SHIFT		16
+
+
+#define	CC_M1_MASK		0x3f		
+#define	CC_M2_MASK		0x3f00		
+#define	CC_M2_SHIFT		8
+#define	CC_M3_MASK		0x3f0000	
+#define	CC_M3_SHIFT		16
+#define	CC_MC_MASK		0x1f000000	
+#define	CC_MC_SHIFT		24
+
+
+#define	CC_F6_2			0x02		
+#define	CC_F6_3			0x03		
+#define	CC_F6_4			0x05		
+#define	CC_F6_5			0x09
+#define	CC_F6_6			0x11
+#define	CC_F6_7			0x21
+
+#define	CC_F5_BIAS		5		
+
+#define	CC_MC_BYPASS		0x08
+#define	CC_MC_M1		0x04
+#define	CC_MC_M1M2		0x02
+#define	CC_MC_M1M2M3		0x01
+#define	CC_MC_M1M3		0x11
+
+
+#define	CC_T2_BIAS		2		
+#define	CC_T2M2_BIAS		3		
+
+#define	CC_T2MC_M1BYP		1
+#define	CC_T2MC_M2BYP		2
+#define	CC_T2MC_M3BYP		4
+
+
+#define	CC_T6_MMASK		1		
+#define	CC_T6_M0		120000000	
+#define	CC_T6_M1		100000000	
+#define	SB2MIPS_T6(sb)		(2 * (sb))
+
+
+#define	CC_CLOCK_BASE1		24000000	
+#define CC_CLOCK_BASE2		12500000	
+
+
+#define	CLKC_5350_N		0x0311
+#define	CLKC_5350_M		0x04020009
+
+
+#define FLASH_NONE		0x000		
+#define SFLASH_ST		0x100		
+#define SFLASH_AT		0x200		
+#define	PFLASH			0x700		
+
+
+#define	CC_CFG_EN		0x0001		
+#define	CC_CFG_EM_MASK		0x000e		
+#define	CC_CFG_EM_ASYNC		0x0000		
+#define	CC_CFG_EM_SYNC		0x0002		
+#define	CC_CFG_EM_PCMCIA	0x0004		
+#define	CC_CFG_EM_IDE		0x0006		
+#define	CC_CFG_DS		0x0010		
+#define	CC_CFG_CD_MASK		0x00e0		
+#define	CC_CFG_CE		0x0100		
+#define	CC_CFG_SB		0x0200		
+#define	CC_CFG_IS		0x0400		
+
+
+#define	CC_EB_BASE		0x1a000000	
+#define	CC_EB_PCMCIA_MEM	0x1a000000	
+#define	CC_EB_PCMCIA_IO		0x1a200000	
+#define	CC_EB_PCMCIA_CFG	0x1a400000	
+#define	CC_EB_IDE		0x1a800000	
+#define	CC_EB_PCMCIA1_MEM	0x1a800000	
+#define	CC_EB_PCMCIA1_IO	0x1aa00000	
+#define	CC_EB_PCMCIA1_CFG	0x1ac00000	
+#define	CC_EB_PROGIF		0x1b000000	
+
+
+
+#define SFLASH_OPCODE		0x000000ff
+#define SFLASH_ACTION		0x00000700
+#define	SFLASH_CS_ACTIVE	0x00001000	
+#define SFLASH_START		0x80000000
+#define SFLASH_BUSY		SFLASH_START
+
+
+#define	SFLASH_ACT_OPONLY	0x0000		
+#define	SFLASH_ACT_OP1D		0x0100		
+#define	SFLASH_ACT_OP3A		0x0200		
+#define	SFLASH_ACT_OP3A1D	0x0300		
+#define	SFLASH_ACT_OP3A4D	0x0400		
+#define	SFLASH_ACT_OP3A4X4D	0x0500		
+#define	SFLASH_ACT_OP3A1X4D	0x0700		
+
+
+#define SFLASH_ST_WREN		0x0006		
+#define SFLASH_ST_WRDIS		0x0004		
+#define SFLASH_ST_RDSR		0x0105		
+#define SFLASH_ST_WRSR		0x0101		
+#define SFLASH_ST_READ		0x0303		
+#define SFLASH_ST_PP		0x0302		
+#define SFLASH_ST_SE		0x02d8		
+#define SFLASH_ST_BE		0x00c7		
+#define SFLASH_ST_DP		0x00b9		
+#define SFLASH_ST_RES		0x03ab		
+#define SFLASH_ST_CSA		0x1000		
+
+
+#define SFLASH_ST_WIP		0x01		
+#define SFLASH_ST_WEL		0x02		
+#define SFLASH_ST_BP_MASK	0x1c		
+#define SFLASH_ST_BP_SHIFT	2
+#define SFLASH_ST_SRWD		0x80		
+
+
+#define SFLASH_AT_READ				0x07e8
+#define SFLASH_AT_PAGE_READ			0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS			0x01d7
+#define SFLASH_AT_BUF1_WRITE			0x0384
+#define SFLASH_AT_BUF2_WRITE			0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM		0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM		0x0286
+#define SFLASH_AT_BUF1_PROGRAM			0x0288
+#define SFLASH_AT_BUF2_PROGRAM			0x0289
+#define SFLASH_AT_PAGE_ERASE			0x0281
+#define SFLASH_AT_BLOCK_ERASE			0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define SFLASH_AT_BUF1_LOAD			0x0253
+#define SFLASH_AT_BUF2_LOAD			0x0255
+#define SFLASH_AT_BUF1_COMPARE			0x0260
+#define SFLASH_AT_BUF2_COMPARE			0x0261
+#define SFLASH_AT_BUF1_REPROGRAM		0x0258
+#define SFLASH_AT_BUF2_REPROGRAM		0x0259
+
+
+#define SFLASH_AT_READY				0x80
+#define SFLASH_AT_MISMATCH			0x40
+#define SFLASH_AT_ID_MASK			0x38
+#define SFLASH_AT_ID_SHIFT			3
+
+
+
+#define UART_RX		0	
+#define UART_TX		0	
+#define UART_DLL	0	
+#define UART_IER	1	
+#define UART_DLM	1	
+#define UART_IIR	2	
+#define UART_FCR	2	
+#define UART_LCR	3	
+#define UART_MCR	4	
+#define UART_LSR	5	
+#define UART_MSR	6	
+#define UART_SCR	7	
+#define UART_LCR_DLAB	0x80	
+#define UART_LCR_WLEN8	0x03	
+#define UART_MCR_OUT2	0x08	
+#define UART_MCR_LOOP	0x10	
+#define UART_LSR_RX_FIFO 	0x80	
+#define UART_LSR_TDHR		0x40	
+#define UART_LSR_THRE		0x20	
+#define UART_LSR_BREAK		0x10	
+#define UART_LSR_FRAMING	0x08	
+#define UART_LSR_PARITY		0x04	
+#define UART_LSR_OVERRUN	0x02	
+#define UART_LSR_RXRDY		0x01	
+#define UART_FCR_FIFO_ENABLE 1	
+
+
+#define UART_IIR_FIFO_MASK	0xc0	
+#define UART_IIR_INT_MASK	0xf	
+#define UART_IIR_MDM_CHG	0x0	
+#define UART_IIR_NOINT		0x1	
+#define UART_IIR_THRE		0x2	
+#define UART_IIR_RCVD_DATA	0x4	
+#define UART_IIR_RCVR_STATUS 	0x6	
+#define UART_IIR_CHAR_TIME 	0xc	
+
+
+#define UART_IER_EDSSI	8	
+#define UART_IER_ELSI	4	
+#define UART_IER_ETBEI  2	
+#define UART_IER_ERBFI	1	
+
+
+#define	PST_INTPEND	0x0040
+#define	PST_SBCLKST	0x0030
+#define	PST_SBCLKST_ILP	0x0010
+#define	PST_SBCLKST_ALP	0x0020
+#define	PST_SBCLKST_HT	0x0030
+#define	PST_ALPAVAIL	0x0008
+#define	PST_HTAVAIL	0x0004
+#define	PST_RESINIT	0x0003
+
+
+#define PCAP_REV_MASK	0x000000ff
+#define PCAP_RC_MASK	0x00001f00
+#define PCAP_RC_SHIFT	8
+#define PCAP_TC_MASK	0x0001e000
+#define PCAP_TC_SHIFT	13
+#define PCAP_PC_MASK	0x001e0000
+#define PCAP_PC_SHIFT	17
+#define PCAP_VC_MASK	0x01e00000
+#define PCAP_VC_SHIFT	21
+#define PCAP_CC_MASK	0x1e000000
+#define PCAP_CC_SHIFT	25
+#define PCAP5_PC_MASK	0x003e0000	
+#define PCAP5_PC_SHIFT	17
+#define PCAP5_VC_MASK	0x07c00000
+#define PCAP5_VC_SHIFT	22
+#define PCAP5_CC_MASK	0xf8000000
+#define PCAP5_CC_SHIFT	27
+
+
+
+#define	PRRT_TIME_MASK	0x03ff
+#define	PRRT_INTEN	0x0400
+#define	PRRT_REQ_ACTIVE	0x0800
+#define	PRRT_ALP_REQ	0x1000
+#define	PRRT_HT_REQ	0x2000
+
+
+#define PMURES_BIT(bit)	(1 << (bit))
+
+
+#define PMURES_MAX_RESNUM	30
+
+
+
+
+#define	PMU0_PLL0_PLLCTL0		0
+#define	PMU0_PLL0_PC0_PDIV_MASK		1
+#define	PMU0_PLL0_PC0_PDIV_FREQ		25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK	0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT	3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE	8
+
+
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ	0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ	1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ	2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ	3 
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ	4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ	5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ	6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ	7
+
+
+#define	PMU0_PLL0_PLLCTL1		1
+#define	PMU0_PLL0_PC1_WILD_INT_MASK	0xf0000000
+#define	PMU0_PLL0_PC1_WILD_INT_SHIFT	28
+#define	PMU0_PLL0_PC1_WILD_FRAC_MASK	0x0fffff00
+#define	PMU0_PLL0_PC1_WILD_FRAC_SHIFT	8
+#define	PMU0_PLL0_PC1_STOP_MOD		0x00000040
+
+
+#define	PMU0_PLL0_PLLCTL2		2
+#define	PMU0_PLL0_PC2_WILD_INT_MASK	0xf
+#define	PMU0_PLL0_PC2_WILD_INT_SHIFT	4
+
+
+#define RES4328_EXT_SWITCHER_PWM	0	
+#define RES4328_BB_SWITCHER_PWM		1	
+#define RES4328_BB_SWITCHER_BURST	2	
+#define RES4328_BB_EXT_SWITCHER_BURST	3	
+#define RES4328_ILP_REQUEST		4	
+#define RES4328_RADIO_SWITCHER_PWM	5	
+#define RES4328_RADIO_SWITCHER_BURST	6	
+#define RES4328_ROM_SWITCH		7	
+#define RES4328_PA_REF_LDO		8	
+#define RES4328_RADIO_LDO		9	
+#define RES4328_AFE_LDO			10	
+#define RES4328_PLL_LDO			11	
+#define RES4328_BG_FILTBYP		12	
+#define RES4328_TX_FILTBYP		13	
+#define RES4328_RX_FILTBYP		14	
+#define RES4328_XTAL_PU			15	
+#define RES4328_XTAL_EN			16	
+#define RES4328_BB_PLL_FILTBYP		17	
+#define RES4328_RF_PLL_FILTBYP		18	
+#define RES4328_BB_PLL_PU		19	
+
+#define RES5354_EXT_SWITCHER_PWM	0	
+#define RES5354_BB_SWITCHER_PWM		1	
+#define RES5354_BB_SWITCHER_BURST	2	
+#define RES5354_BB_EXT_SWITCHER_BURST	3	
+#define RES5354_ILP_REQUEST		4	
+#define RES5354_RADIO_SWITCHER_PWM	5	
+#define RES5354_RADIO_SWITCHER_BURST	6	
+#define RES5354_ROM_SWITCH		7	
+#define RES5354_PA_REF_LDO		8	
+#define RES5354_RADIO_LDO		9	
+#define RES5354_AFE_LDO			10	
+#define RES5354_PLL_LDO			11	
+#define RES5354_BG_FILTBYP		12	
+#define RES5354_TX_FILTBYP		13	
+#define RES5354_RX_FILTBYP		14	
+#define RES5354_XTAL_PU			15	
+#define RES5354_XTAL_EN			16	
+#define RES5354_BB_PLL_FILTBYP		17	
+#define RES5354_RF_PLL_FILTBYP		18	
+#define RES5354_BB_PLL_PU		19	
+
+
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL  (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+
+#define PMU2_PHY_PLL_PLLCTL		4
+#define PMU2_SI_PLL_PLLCTL		10
+
+
+#define RES4325_BUCK_BOOST_BURST	0	
+#define RES4325_CBUCK_BURST		1	
+#define RES4325_CBUCK_PWM		2	
+#define RES4325_CLDO_CBUCK_BURST	3	
+#define RES4325_CLDO_CBUCK_PWM		4	
+#define RES4325_BUCK_BOOST_PWM		5	
+#define RES4325_ILP_REQUEST		6	
+#define RES4325_ABUCK_BURST		7	
+#define RES4325_ABUCK_PWM		8	
+#define RES4325_LNLDO1_PU		9	
+#define RES4325_OTP_PU			10	
+#define RES4325_LNLDO3_PU		11	
+#define RES4325_LNLDO4_PU		12	
+#define RES4325_XTAL_PU			13	
+#define RES4325_ALP_AVAIL		14	
+#define RES4325_RX_PWRSW_PU		15	
+#define RES4325_TX_PWRSW_PU		16	
+#define RES4325_RFPLL_PWRSW_PU		17	
+#define RES4325_LOGEN_PWRSW_PU		18	
+#define RES4325_AFE_PWRSW_PU		19	
+#define RES4325_BBPLL_PWRSW_PU		20	
+#define RES4325_HT_AVAIL		21	
+
+
+#define RES4325B0_CBUCK_LPOM		1	
+#define RES4325B0_CBUCK_BURST		2	
+#define RES4325B0_CBUCK_PWM		3	
+#define RES4325B0_CLDO_PU		4	
+
+
+#define RES4325C1_OTP_PWRSW_PU		10	
+#define RES4325C1_LNLDO2_PU		12	
+
+
+#define CST4325_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4325_DEFCIS_SEL		0	
+#define CST4325_SPROM_SEL		1	
+#define CST4325_OTP_SEL			2	
+#define CST4325_OTP_PWRDN		3	
+#define CST4325_SDIO_USB_MODE_MASK	0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT	2
+#define CST4325_RCAL_VALID_MASK		0x00000008
+#define CST4325_RCAL_VALID_SHIFT	3
+#define CST4325_RCAL_VALUE_MASK		0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT	4
+#define CST4325_PMUTOP_2B_MASK 		0x00000200	
+#define CST4325_PMUTOP_2B_SHIFT   	9
+
+#define RES4329_RESERVED0		0	
+#define RES4329_CBUCK_LPOM		1	
+#define RES4329_CBUCK_BURST		2	
+#define RES4329_CBUCK_PWM		3	
+#define RES4329_CLDO_PU			4	
+#define RES4329_PALDO_PU		5	
+#define RES4329_ILP_REQUEST		6	
+#define RES4329_RESERVED7		7	
+#define RES4329_RESERVED8		8	
+#define RES4329_LNLDO1_PU		9	
+#define RES4329_OTP_PU			10	
+#define RES4329_RESERVED11		11	
+#define RES4329_LNLDO2_PU		12	
+#define RES4329_XTAL_PU			13	
+#define RES4329_ALP_AVAIL		14	
+#define RES4329_RX_PWRSW_PU		15	
+#define RES4329_TX_PWRSW_PU		16	
+#define RES4329_RFPLL_PWRSW_PU		17	
+#define RES4329_LOGEN_PWRSW_PU		18	
+#define RES4329_AFE_PWRSW_PU		19	
+#define RES4329_BBPLL_PWRSW_PU		20	
+#define RES4329_HT_AVAIL		21	
+
+#define CST4329_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4329_DEFCIS_SEL		0	
+#define CST4329_SPROM_SEL		1	
+#define CST4329_OTP_SEL			2	
+#define CST4329_OTP_PWRDN		3	
+#define CST4329_SPI_SDIO_MODE_MASK	0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT	2
+
+
+#define RES4312_SWITCHER_BURST		0	
+#define RES4312_SWITCHER_PWM    	1	
+#define RES4312_PA_REF_LDO		2	
+#define RES4312_CORE_LDO_BURST		3	
+#define RES4312_CORE_LDO_PWM		4	
+#define RES4312_RADIO_LDO		5	
+#define RES4312_ILP_REQUEST		6	
+#define RES4312_BG_FILTBYP		7	
+#define RES4312_TX_FILTBYP		8	
+#define RES4312_RX_FILTBYP		9	
+#define RES4312_XTAL_PU			10	
+#define RES4312_ALP_AVAIL		11	
+#define RES4312_BB_PLL_FILTBYP		12	
+#define RES4312_RF_PLL_FILTBYP		13	
+#define RES4312_HT_AVAIL		14	
+
+#define RES4322_RF_LDO			0
+#define RES4322_ILP_REQUEST		1
+#define RES4322_XTAL_PU			2
+#define RES4322_ALP_AVAIL		3
+#define RES4322_SI_PLL_ON		4
+#define RES4322_HT_SI_AVAIL		5
+#define RES4322_PHY_PLL_ON		6
+#define RES4322_HT_PHY_AVAIL		7
+#define RES4322_OTP_PU			8
+
+
+#define CST4322_XTAL_FREQ_20_40MHZ	0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK	0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT	6
+#define CST4322_NO_SPROM_OTP		0	
+#define CST4322_SPROM_PRESENT		1	
+#define CST4322_OTP_PRESENT		2	
+#define CST4322_PCI_OR_USB		0x00000100
+#define CST4322_BOOT_MASK		0x00000600
+#define CST4322_BOOT_SHIFT		9
+#define CST4322_BOOT_FROM_SRAM		0	
+#define CST4322_BOOT_FROM_ROM		1	
+#define CST4322_BOOT_FROM_FLASH		2	
+#define CST4322_BOOT_FROM_INVALID	3
+#define CST4322_ILP_DIV_EN		0x00000800
+#define CST4322_FLASH_TYPE_MASK		0x00001000
+#define CST4322_FLASH_TYPE_SHIFT	12
+#define CST4322_FLASH_TYPE_SHIFT_ST	0	
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL	1	
+#define CST4322_ARM_TAP_SEL		0x00002000
+#define CST4322_RES_INIT_MODE_MASK	0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT	14
+#define CST4322_RES_INIT_MODE_ILPAVAIL	0	
+#define CST4322_RES_INIT_MODE_ILPREQ	1	
+#define CST4322_RES_INIT_MODE_ALPAVAIL	2	
+#define CST4322_RES_INIT_MODE_HTAVAIL	3	
+#define CST4322_PCIPLLCLK_GATING	0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP	0x00020000
+#define CST4322_PCI_CARDBUS_MODE	0x00040000
+
+#define RES4315_CBUCK_LPOM		1	
+#define RES4315_CBUCK_BURST		2	
+#define RES4315_CBUCK_PWM		3	
+#define RES4315_CLDO_PU			4	
+#define RES4315_PALDO_PU		5	
+#define RES4315_ILP_REQUEST		6	
+#define RES4315_LNLDO1_PU		9	
+#define RES4315_OTP_PU			10	
+#define RES4315_LNLDO2_PU		12	
+#define RES4315_XTAL_PU			13	
+#define RES4315_ALP_AVAIL		14	
+#define RES4315_RX_PWRSW_PU		15	
+#define RES4315_TX_PWRSW_PU		16	
+#define RES4315_RFPLL_PWRSW_PU		17	
+#define RES4315_LOGEN_PWRSW_PU		18	
+#define RES4315_AFE_PWRSW_PU		19	
+#define RES4315_BBPLL_PWRSW_PU		20	
+#define RES4315_HT_AVAIL		21	
+
+#define CST4315_SPROM_OTP_SEL_MASK	0x00000003	
+#define CST4315_DEFCIS_SEL		0x00000000	
+#define CST4315_SPROM_SEL		0x00000001	
+#define CST4315_OTP_SEL			0x00000002	
+#define CST4315_OTP_PWRDN		0x00000003	
+#define CST4315_SDIO_MODE		0x00000004	
+#define CST4315_RCAL_VALID		0x00000008
+#define CST4315_RCAL_VALUE_MASK		0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT	4
+#define CST4315_PALDO_EXTPNP		0x00000200	
+#define CST4315_CBUCK_MODE_MASK		0x00000c00
+#define CST4315_CBUCK_MODE_BURST	0x00000400
+#define CST4315_CBUCK_MODE_LPBURST	0x00000c00
+
+#define PMU_MAX_TRANSITION_DLY	15000
+
+
+#define PMURES_UP_TRANSITION	2
+
+
+
+
+
+#define ECI_BW_20   0x0
+#define ECI_BW_25   0x1
+#define ECI_BW_30   0x2
+#define ECI_BW_35   0x3
+#define ECI_BW_40   0x4
+#define ECI_BW_45   0x5
+#define ECI_BW_50   0x6
+#define ECI_BW_ALL  0x7
+
+
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/sbconfig.h b/drivers/net/wireless/bcm4329/include/sbconfig.h
new file mode 100644
index 0000000..da18ccb
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbconfig.h
@@ -0,0 +1,276 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h,v 13.67.30.1 2008/05/07 20:17:27 Exp $
+ */
+
+
+#ifndef	_SBCONFIG_H
+#define	_SBCONFIG_H
+
+
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+
+#define SB_BUS_SIZE		0x10000		
+#define SB_BUS_BASE(b)		(SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define	SB_BUS_MAXCORES		(SB_BUS_SIZE / SI_CORE_SIZE)	
+
+
+#define	SBCONFIGOFF		0xf00		
+#define	SBCONFIGSIZE		256		
+
+#define SBIPSFLAG		0x08
+#define SBTPSFLAG		0x18
+#define	SBTMERRLOGA		0x48		
+#define	SBTMERRLOG		0x50		
+#define SBADMATCH3		0x60
+#define SBADMATCH2		0x68
+#define SBADMATCH1		0x70
+#define SBIMSTATE		0x90
+#define SBINTVEC		0x94
+#define SBTMSTATELOW		0x98
+#define SBTMSTATEHIGH		0x9c
+#define SBBWA0			0xa0
+#define SBIMCONFIGLOW		0xa8
+#define SBIMCONFIGHIGH		0xac
+#define SBADMATCH0		0xb0
+#define SBTMCONFIGLOW		0xb8
+#define SBTMCONFIGHIGH		0xbc
+#define SBBCONFIG		0xc0
+#define SBBSTATE		0xc8
+#define SBACTCNFG		0xd8
+#define	SBFLAGST		0xe8
+#define SBIDLOW			0xf8
+#define SBIDHIGH		0xfc
+
+
+
+#define SBIMERRLOGA		0xea8
+#define SBIMERRLOG		0xeb0
+#define SBTMPORTCONNID0		0xed8
+#define SBTMPORTLOCK0		0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+	uint32	PAD[2];
+	uint32	sbipsflag;		
+	uint32	PAD[3];
+	uint32	sbtpsflag;		
+	uint32	PAD[11];
+	uint32	sbtmerrloga;		
+	uint32	PAD;
+	uint32	sbtmerrlog;		
+	uint32	PAD[3];
+	uint32	sbadmatch3;		
+	uint32	PAD;
+	uint32	sbadmatch2;		
+	uint32	PAD;
+	uint32	sbadmatch1;		
+	uint32	PAD[7];
+	uint32	sbimstate;		
+	uint32	sbintvec;		
+	uint32	sbtmstatelow;		
+	uint32	sbtmstatehigh;		
+	uint32	sbbwa0;			
+	uint32	PAD;
+	uint32	sbimconfiglow;		
+	uint32	sbimconfighigh;		
+	uint32	sbadmatch0;		
+	uint32	PAD;
+	uint32	sbtmconfiglow;		
+	uint32	sbtmconfighigh;		
+	uint32	sbbconfig;		
+	uint32	PAD;
+	uint32	sbbstate;		
+	uint32	PAD[3];
+	uint32	sbactcnfg;		
+	uint32	PAD[3];
+	uint32	sbflagst;		
+	uint32	PAD[3];
+	uint32	sbidlow;		
+	uint32	sbidhigh;		
+} sbconfig_t;
+
+#endif 
+
+
+#define	SBIPS_INT1_MASK		0x3f		
+#define	SBIPS_INT1_SHIFT	0
+#define	SBIPS_INT2_MASK		0x3f00		
+#define	SBIPS_INT2_SHIFT	8
+#define	SBIPS_INT3_MASK		0x3f0000	
+#define	SBIPS_INT3_SHIFT	16
+#define	SBIPS_INT4_MASK		0x3f000000	
+#define	SBIPS_INT4_SHIFT	24
+
+
+#define	SBTPS_NUM0_MASK		0x3f		
+#define	SBTPS_F0EN0		0x40		
+
+
+#define	SBTMEL_CM		0x00000007	
+#define	SBTMEL_CI		0x0000ff00	
+#define	SBTMEL_EC		0x0f000000	
+#define	SBTMEL_ME		0x80000000	
+
+
+#define	SBIM_PC			0xf		
+#define	SBIM_AP_MASK		0x30		
+#define	SBIM_AP_BOTH		0x00		
+#define	SBIM_AP_TS		0x10		
+#define	SBIM_AP_TK		0x20		
+#define	SBIM_AP_RSV		0x30		
+#define	SBIM_IBE		0x20000		
+#define	SBIM_TO			0x40000		
+#define	SBIM_BY			0x01800000	
+#define	SBIM_RJ			0x02000000	
+
+
+#define	SBTML_RESET		0x0001		
+#define	SBTML_REJ_MASK		0x0006		
+#define	SBTML_REJ		0x0002		
+#define	SBTML_TMPREJ		0x0004		
+
+#define	SBTML_SICF_SHIFT	16		
+
+
+#define	SBTMH_SERR		0x0001		
+#define	SBTMH_INT		0x0002		
+#define	SBTMH_BUSY		0x0004		
+#define	SBTMH_TO		0x0020		
+
+#define	SBTMH_SISF_SHIFT	16		
+
+
+#define	SBBWA_TAB0_MASK		0xffff		
+#define	SBBWA_TAB1_MASK		0xffff		
+#define	SBBWA_TAB1_SHIFT	16
+
+
+#define	SBIMCL_STO_MASK		0x7		
+#define	SBIMCL_RTO_MASK		0x70		
+#define	SBIMCL_RTO_SHIFT	4
+#define	SBIMCL_CID_MASK		0xff0000	
+#define	SBIMCL_CID_SHIFT	16
+
+
+#define	SBIMCH_IEM_MASK		0xc		
+#define	SBIMCH_TEM_MASK		0x30		
+#define	SBIMCH_TEM_SHIFT	4
+#define	SBIMCH_BEM_MASK		0xc0		
+#define	SBIMCH_BEM_SHIFT	6
+
+
+#define	SBAM_TYPE_MASK		0x3		
+#define	SBAM_AD64		0x4		
+#define	SBAM_ADINT0_MASK	0xf8		
+#define	SBAM_ADINT0_SHIFT	3
+#define	SBAM_ADINT1_MASK	0x1f8		
+#define	SBAM_ADINT1_SHIFT	3
+#define	SBAM_ADINT2_MASK	0x1f8		
+#define	SBAM_ADINT2_SHIFT	3
+#define	SBAM_ADEN		0x400		
+#define	SBAM_ADNEG		0x800		
+#define	SBAM_BASE0_MASK		0xffffff00	
+#define	SBAM_BASE0_SHIFT	8
+#define	SBAM_BASE1_MASK		0xfffff000	
+#define	SBAM_BASE1_SHIFT	12
+#define	SBAM_BASE2_MASK		0xffff0000	
+#define	SBAM_BASE2_SHIFT	16
+
+
+#define	SBTMCL_CD_MASK		0xff		
+#define	SBTMCL_CO_MASK		0xf800		
+#define	SBTMCL_CO_SHIFT		11
+#define	SBTMCL_IF_MASK		0xfc0000	
+#define	SBTMCL_IF_SHIFT		18
+#define	SBTMCL_IM_MASK		0x3000000	
+#define	SBTMCL_IM_SHIFT		24
+
+
+#define	SBTMCH_BM_MASK		0x3		
+#define	SBTMCH_RM_MASK		0x3		
+#define	SBTMCH_RM_SHIFT		2
+#define	SBTMCH_SM_MASK		0x30		
+#define	SBTMCH_SM_SHIFT		4
+#define	SBTMCH_EM_MASK		0x300		
+#define	SBTMCH_EM_SHIFT		8
+#define	SBTMCH_IM_MASK		0xc00		
+#define	SBTMCH_IM_SHIFT		10
+
+
+#define	SBBC_LAT_MASK		0x3		
+#define	SBBC_MAX0_MASK		0xf0000		
+#define	SBBC_MAX0_SHIFT		16
+#define	SBBC_MAX1_MASK		0xf00000	
+#define	SBBC_MAX1_SHIFT		20
+
+
+#define	SBBS_SRD		0x1		
+#define	SBBS_HRD		0x2		
+
+
+#define	SBIDL_CS_MASK		0x3		
+#define	SBIDL_AR_MASK		0x38		
+#define	SBIDL_AR_SHIFT		3
+#define	SBIDL_SYNCH		0x40		
+#define	SBIDL_INIT		0x80		
+#define	SBIDL_MINLAT_MASK	0xf00		
+#define	SBIDL_MINLAT_SHIFT	8
+#define	SBIDL_MAXLAT		0xf000		
+#define	SBIDL_MAXLAT_SHIFT	12
+#define	SBIDL_FIRST		0x10000		
+#define	SBIDL_CW_MASK		0xc0000		
+#define	SBIDL_CW_SHIFT		18
+#define	SBIDL_TP_MASK		0xf00000	
+#define	SBIDL_TP_SHIFT		20
+#define	SBIDL_IP_MASK		0xf000000	
+#define	SBIDL_IP_SHIFT		24
+#define	SBIDL_RV_MASK		0xf0000000	
+#define	SBIDL_RV_SHIFT		28
+#define	SBIDL_RV_2_2		0x00000000	
+#define	SBIDL_RV_2_3		0x10000000	
+
+
+#define	SBIDH_RC_MASK		0x000f		
+#define	SBIDH_RCE_MASK		0x7000		
+#define	SBIDH_RCE_SHIFT		8
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define	SBIDH_CC_MASK		0x8ff0		
+#define	SBIDH_CC_SHIFT		4
+#define	SBIDH_VC_MASK		0xffff0000	
+#define	SBIDH_VC_SHIFT		16
+
+#define	SB_COMMIT		0xfd8		
+
+
+#define	SB_VEND_BCM		0x4243		
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/sbhnddma.h b/drivers/net/wireless/bcm4329/include/sbhnddma.h
new file mode 100644
index 0000000..7681395
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbhnddma.h
@@ -0,0 +1,294 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h,v 13.11.250.5.16.1 2009/07/21 14:04:51 Exp $
+ */
+
+
+#ifndef	_sbhnddma_h_
+#define	_sbhnddma_h_
+
+
+
+
+
+
+
+typedef volatile struct {
+	uint32	control;		
+	uint32	addr;			
+	uint32	ptr;			
+	uint32	status;			
+} dma32regs_t;
+
+typedef volatile struct {
+	dma32regs_t	xmt;		
+	dma32regs_t	rcv;		
+} dma32regp_t;
+
+typedef volatile struct {	
+	uint32	fifoaddr;		
+	uint32	fifodatalow;		
+	uint32	fifodatahigh;		
+	uint32	pad;			
+} dma32diag_t;
+
+
+typedef volatile struct {
+	uint32	ctrl;		
+	uint32	addr;		
+} dma32dd_t;
+
+
+#define	D32RINGALIGN_BITS	12
+#define	D32MAXRINGSZ	(1 << D32RINGALIGN_BITS)
+#define	D32RINGALIGN	(1 << D32RINGALIGN_BITS)
+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
+
+
+#define	XC_XE		((uint32)1 << 0)	
+#define	XC_SE		((uint32)1 << 1)	
+#define	XC_LE		((uint32)1 << 2)	
+#define	XC_FL		((uint32)1 << 4)	
+#define	XC_PD		((uint32)1 << 11)	
+#define	XC_AE		((uint32)3 << 16)	
+#define	XC_AE_SHIFT	16
+
+
+#define	XP_LD_MASK	0xfff			
+
+
+#define	XS_CD_MASK	0x0fff			
+#define	XS_XS_MASK	0xf000			
+#define	XS_XS_SHIFT	12
+#define	XS_XS_DISABLED	0x0000			
+#define	XS_XS_ACTIVE	0x1000			
+#define	XS_XS_IDLE	0x2000			
+#define	XS_XS_STOPPED	0x3000			
+#define	XS_XS_SUSP	0x4000			
+#define	XS_XE_MASK	0xf0000			
+#define	XS_XE_SHIFT	16
+#define	XS_XE_NOERR	0x00000			
+#define	XS_XE_DPE	0x10000			
+#define	XS_XE_DFU	0x20000			
+#define	XS_XE_BEBR	0x30000			
+#define	XS_XE_BEDA	0x40000			
+#define	XS_AD_MASK	0xfff00000		
+#define	XS_AD_SHIFT	20
+
+
+#define	RC_RE		((uint32)1 << 0)	
+#define	RC_RO_MASK	0xfe			
+#define	RC_RO_SHIFT	1
+#define	RC_FM		((uint32)1 << 8)	
+#define	RC_SH		((uint32)1 << 9)	
+#define	RC_OC		((uint32)1 << 10)	
+#define	RC_PD		((uint32)1 << 11)	
+#define	RC_AE		((uint32)3 << 16)	
+#define	RC_AE_SHIFT	16
+
+
+#define	RP_LD_MASK	0xfff			
+
+
+#define	RS_CD_MASK	0x0fff			
+#define	RS_RS_MASK	0xf000			
+#define	RS_RS_SHIFT	12
+#define	RS_RS_DISABLED	0x0000			
+#define	RS_RS_ACTIVE	0x1000			
+#define	RS_RS_IDLE	0x2000			
+#define	RS_RS_STOPPED	0x3000			
+#define	RS_RE_MASK	0xf0000			
+#define	RS_RE_SHIFT	16
+#define	RS_RE_NOERR	0x00000			
+#define	RS_RE_DPE	0x10000			
+#define	RS_RE_DFO	0x20000			
+#define	RS_RE_BEBW	0x30000			
+#define	RS_RE_BEDA	0x40000			
+#define	RS_AD_MASK	0xfff00000		
+#define	RS_AD_SHIFT	20
+
+
+#define	FA_OFF_MASK	0xffff			
+#define	FA_SEL_MASK	0xf0000			
+#define	FA_SEL_SHIFT	16
+#define	FA_SEL_XDD	0x00000			
+#define	FA_SEL_XDP	0x10000			
+#define	FA_SEL_RDD	0x40000			
+#define	FA_SEL_RDP	0x50000			
+#define	FA_SEL_XFD	0x80000			
+#define	FA_SEL_XFP	0x90000			
+#define	FA_SEL_RFD	0xc0000			
+#define	FA_SEL_RFP	0xd0000			
+#define	FA_SEL_RSD	0xe0000			
+#define	FA_SEL_RSP	0xf0000			
+
+
+#define	CTRL_BC_MASK	0x1fff			
+#define	CTRL_AE		((uint32)3 << 16)	
+#define	CTRL_AE_SHIFT	16
+#define	CTRL_EOT	((uint32)1 << 28)	
+#define	CTRL_IOC	((uint32)1 << 29)	
+#define	CTRL_EOF	((uint32)1 << 30)	
+#define	CTRL_SOF	((uint32)1 << 31)	
+
+
+#define	CTRL_CORE_MASK	0x0ff00000
+
+
+
+
+typedef volatile struct {
+	uint32	control;		
+	uint32	ptr;			
+	uint32	addrlow;		
+	uint32	addrhigh;		
+	uint32	status0;		
+	uint32	status1;		
+} dma64regs_t;
+
+typedef volatile struct {
+	dma64regs_t	tx;		
+	dma64regs_t	rx;		
+} dma64regp_t;
+
+typedef volatile struct {		
+	uint32	fifoaddr;		
+	uint32	fifodatalow;		
+	uint32	fifodatahigh;		
+	uint32	pad;			
+} dma64diag_t;
+
+
+typedef volatile struct {
+	uint32	ctrl1;		
+	uint32	ctrl2;		
+	uint32	addrlow;	
+	uint32	addrhigh;	
+} dma64dd_t;
+
+
+#define D64RINGALIGN_BITS 13	
+#define	D64MAXRINGSZ	(1 << D64RINGALIGN_BITS)
+#define	D64RINGALIGN	(1 << D64RINGALIGN_BITS)
+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
+
+
+#define	D64_XC_XE		0x00000001	
+#define	D64_XC_SE		0x00000002	
+#define	D64_XC_LE		0x00000004	
+#define	D64_XC_FL		0x00000010	
+#define	D64_XC_PD		0x00000800	
+#define	D64_XC_AE		0x00030000	
+#define	D64_XC_AE_SHIFT		16
+
+
+#define	D64_XP_LD_MASK		0x00000fff	
+
+
+#define	D64_XS0_CD_MASK		0x00001fff	
+#define	D64_XS0_XS_MASK		0xf0000000     	
+#define	D64_XS0_XS_SHIFT		28
+#define	D64_XS0_XS_DISABLED	0x00000000	
+#define	D64_XS0_XS_ACTIVE	0x10000000	
+#define	D64_XS0_XS_IDLE		0x20000000	
+#define	D64_XS0_XS_STOPPED	0x30000000	
+#define	D64_XS0_XS_SUSP		0x40000000	
+
+#define	D64_XS1_AD_MASK		0x0001ffff	
+#define	D64_XS1_XE_MASK		0xf0000000     	
+#define	D64_XS1_XE_SHIFT		28
+#define	D64_XS1_XE_NOERR	0x00000000	
+#define	D64_XS1_XE_DPE		0x10000000	
+#define	D64_XS1_XE_DFU		0x20000000	
+#define	D64_XS1_XE_DTE		0x30000000	
+#define	D64_XS1_XE_DESRE	0x40000000	
+#define	D64_XS1_XE_COREE	0x50000000	
+
+
+#define	D64_RC_RE		0x00000001	
+#define	D64_RC_RO_MASK		0x000000fe	
+#define	D64_RC_RO_SHIFT		1
+#define	D64_RC_FM		0x00000100	
+#define	D64_RC_SH		0x00000200	
+#define	D64_RC_OC		0x00000400	
+#define	D64_RC_PD		0x00000800	
+#define	D64_RC_AE		0x00030000	
+#define	D64_RC_AE_SHIFT		16
+
+
+#define	D64_RP_LD_MASK		0x00000fff	
+
+
+#define	D64_RS0_CD_MASK		0x00001fff	
+#define	D64_RS0_RS_MASK		0xf0000000     	
+#define	D64_RS0_RS_SHIFT		28
+#define	D64_RS0_RS_DISABLED	0x00000000	
+#define	D64_RS0_RS_ACTIVE	0x10000000	
+#define	D64_RS0_RS_IDLE		0x20000000	
+#define	D64_RS0_RS_STOPPED	0x30000000	
+#define	D64_RS0_RS_SUSP		0x40000000	
+
+#define	D64_RS1_AD_MASK		0x0001ffff	
+#define	D64_RS1_RE_MASK		0xf0000000     	
+#define	D64_RS1_RE_SHIFT		28
+#define	D64_RS1_RE_NOERR	0x00000000	
+#define	D64_RS1_RE_DPO		0x10000000	
+#define	D64_RS1_RE_DFU		0x20000000	
+#define	D64_RS1_RE_DTE		0x30000000	
+#define	D64_RS1_RE_DESRE	0x40000000	
+#define	D64_RS1_RE_COREE	0x50000000	
+
+
+#define	D64_FA_OFF_MASK		0xffff		
+#define	D64_FA_SEL_MASK		0xf0000		
+#define	D64_FA_SEL_SHIFT	16
+#define	D64_FA_SEL_XDD		0x00000		
+#define	D64_FA_SEL_XDP		0x10000		
+#define	D64_FA_SEL_RDD		0x40000		
+#define	D64_FA_SEL_RDP		0x50000		
+#define	D64_FA_SEL_XFD		0x80000		
+#define	D64_FA_SEL_XFP		0x90000		
+#define	D64_FA_SEL_RFD		0xc0000		
+#define	D64_FA_SEL_RFP		0xd0000		
+#define	D64_FA_SEL_RSD		0xe0000		
+#define	D64_FA_SEL_RSP		0xf0000		
+
+
+#define	D64_CTRL1_EOT		((uint32)1 << 28)	
+#define	D64_CTRL1_IOC		((uint32)1 << 29)	
+#define	D64_CTRL1_EOF		((uint32)1 << 30)	
+#define	D64_CTRL1_SOF		((uint32)1 << 31)	
+
+
+#define	D64_CTRL2_BC_MASK	0x00007fff	
+#define	D64_CTRL2_AE		0x00030000	
+#define	D64_CTRL2_AE_SHIFT	16
+#define D64_CTRL2_PARITY	0x00040000      
+
+
+#define	D64_CTRL_CORE_MASK	0x0ff00000
+
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/sbpcmcia.h b/drivers/net/wireless/bcm4329/include/sbpcmcia.h
new file mode 100644
index 0000000..d6d8033
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbpcmcia.h
@@ -0,0 +1,109 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h,v 13.31.4.1.2.3.8.7 2009/06/22 05:14:24 Exp $
+ */
+
+
+#ifndef	_SBPCMCIA_H
+#define	_SBPCMCIA_H
+
+
+
+
+#define	PCMCIA_FCR		(0x700 / 2)
+
+#define	FCR0_OFF		0
+#define	FCR1_OFF		(0x40 / 2)
+#define	FCR2_OFF		(0x80 / 2)
+#define	FCR3_OFF		(0xc0 / 2)
+
+#define	PCMCIA_FCR0		(0x700 / 2)
+#define	PCMCIA_FCR1		(0x740 / 2)
+#define	PCMCIA_FCR2		(0x780 / 2)
+#define	PCMCIA_FCR3		(0x7c0 / 2)
+
+
+
+#define	PCMCIA_COR		0
+
+#define	COR_RST			0x80
+#define	COR_LEV			0x40
+#define	COR_IRQEN		0x04
+#define	COR_BLREN		0x01
+#define	COR_FUNEN		0x01
+
+
+#define	PCICIA_FCSR		(2 / 2)
+#define	PCICIA_PRR		(4 / 2)
+#define	PCICIA_SCR		(6 / 2)
+#define	PCICIA_ESR		(8 / 2)
+
+
+#define PCM_MEMOFF		0x0000
+#define F0_MEMOFF		0x1000
+#define F1_MEMOFF		0x2000
+#define F2_MEMOFF		0x3000
+#define F3_MEMOFF		0x4000
+
+
+#define MEM_ADDR0		(0x728 / 2)
+#define MEM_ADDR1		(0x72a / 2)
+#define MEM_ADDR2		(0x72c / 2)
+
+
+#define PCMCIA_ADDR0		(0x072e / 2)
+#define PCMCIA_ADDR1		(0x0730 / 2)
+#define PCMCIA_ADDR2		(0x0732 / 2)
+
+#define MEM_SEG			(0x0734 / 2)
+#define SROM_CS			(0x0736 / 2)
+#define SROM_DATAL		(0x0738 / 2)
+#define SROM_DATAH		(0x073a / 2)
+#define SROM_ADDRL		(0x073c / 2)
+#define SROM_ADDRH		(0x073e / 2)
+#define	SROM_INFO2		(0x0772 / 2)	
+#define	SROM_INFO		(0x07be / 2)	
+
+
+#define SROM_IDLE		0
+#define SROM_WRITE		1
+#define SROM_READ		2
+#define SROM_WEN		4
+#define SROM_WDS		7
+#define SROM_DONE		8
+
+
+#define	SRI_SZ_MASK		0x03
+#define	SRI_BLANK		0x04
+#define	SRI_OTP			0x80
+
+
+
+#define SBTML_INT_ACK		0x40000		
+#define SBTML_INT_EN		0x20000		
+
+
+#define SBTMH_INT_STATUS	0x40000		
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/sbsdio.h b/drivers/net/wireless/bcm4329/include/sbsdio.h
new file mode 100644
index 0000000..75aaf4d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbsdio.h
@@ -0,0 +1,166 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h,v 13.29.4.1.22.3 2009/03/11 20:26:57 Exp $
+ */
+
+#ifndef	_SBSDIO_H
+#define	_SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION		3	/* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS			0x10000		/* sprom command and status */
+#define SBSDIO_SPROM_INFO		0x10001		/* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW		0x10002		/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003 	/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004		/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH		0x10005		/* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA		0x10006		/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN		0x10007		/* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK		0x10008		/* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL		0x10009		/* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A		/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B		/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C		/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D		/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E		/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 	0x1000F		/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019		/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A		/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B		/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C		/* Read Frame Byte Count High */
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000 	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001C 	/* f1 misc register end */
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE		0
+#define SBSDIO_SPROM_WRITE		1
+#define SBSDIO_SPROM_READ		2
+#define SBSDIO_SPROM_WEN		4
+#define SBSDIO_SPROM_WDS		7
+#define SBSDIO_SPROM_DONE		8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK			0x03		/* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK			0x04		/* depreciated in corerev 6 */
+#define	SROM_OTP			0x80		/* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL		0x01		/* or'd with onchip xtal_pu,
+							 * 1: power on oscillator
+							 * (for 4318 only)
+							 */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK		0x7f		/* number of words - 1 for sd device
+							 * to wait before sending data to host
+							 */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY		0x01		/* 1: device will assert busy signal when
+							 * receiving CMD53
+							 */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02		/* 1: assertion of sdio interrupt is
+							 * synchronous to the sdio clock
+							 */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04		/* 1: mask all interrupts to host
+							 * except the chipActive (rev 8)
+							 */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08		/* 1: isolate internal sdio signals, put
+							 * external pads in tri-state; requires
+							 * sdio bus power cycle to clear (rev 9)
+							 */
+#define SBSDIO_DEVCTL_SB_RST_CTL	0x30		/* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL	0x00		/*   Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET	0x10		/*   Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET	0x20		/*   Force no backplane reset */
+
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP		0x01		/* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT			0x02		/* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP		0x04		/* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ		0x08		/* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ		0x10		/* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20		/* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL		0x40		/* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL			0x80		/* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL		0x40
+#define SBSDIO_Rev8_ALP_AVAIL		0x80
+
+#define SBSDIO_AVBITS			(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)		((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)		(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)		(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly)	(SBSDIO_ALPAV(regval) && \
+					(alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0		0x01		/* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1		0x02		/* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2		0x04		/* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD		0x08		/* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL		0x0f		/* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF		/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000		/* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK		0x80		/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff		/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU		/* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000	/* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON		0x1000		/* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200		/* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT       0x078           /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF		/* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6		/* manfid tuple length, include tuple,
+							 * link bytes
+							 */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET		0x8		/* 8 control bytes first, CIS starts from
+							 * 8th byte
+							 */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX	64		/* sdio byte mode: maximum length of one
+							 * data comamnd
+							 */
+
+#define SBSDIO_CORE_ADDR_MASK		0x1FFFF		/* sdio core function one address mask */
+
+#endif	/* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h b/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h
new file mode 100644
index 0000000..7c7c7e4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbsdpcmdev.h
@@ -0,0 +1,288 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific device core support
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h,v 13.29.4.1.4.6.6.2 2008/12/31 21:16:51 Exp $
+ */
+
+#ifndef	_sbsdpcmdev_h_
+#define	_sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	dma64regs_t	xmt;		/* dma tx */
+	uint32 PAD[2];
+	dma64regs_t	rcv;		/* dma rx */
+	uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+	dma64p_t dma64regs[2];
+	dma64diag_t dmafifo;		/* DMA Diagnostic Regs, 0x280-0x28c */
+	uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+	dma32regp_t dma32regs[2];	/* dma tx & rx, 0x200-0x23c */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x240-0x24c */
+	uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+	dma32regp_t dmaregs;		/* DMA Regs, 0x200-0x21c, rev8 */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x220-0x22c */
+	uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+	uint32 corecontrol;		/* CoreControl, 0x000, rev8 */
+	uint32 corestatus;		/* CoreStatus, 0x004, rev8  */
+	uint32 PAD[1];
+	uint32 biststatus;		/* BistStatus, 0x00c, rev8  */
+
+	/* PCMCIA access */
+	uint16 pcmciamesportaladdr;	/* PcmciaMesPortalAddr, 0x010, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciamesportalmask;	/* PcmciaMesPortalMask, 0x014, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciawrframebc;		/* PcmciaWrFrameBC, 0x018, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciaunderflowtimer;	/* PcmciaUnderflowTimer, 0x01c, rev8   */
+	uint16 PAD[1];
+
+	/* interrupt */
+	uint32 intstatus;		/* IntStatus, 0x020, rev8   */
+	uint32 hostintmask;		/* IntHostMask, 0x024, rev8   */
+	uint32 intmask;			/* IntSbMask, 0x028, rev8   */
+	uint32 sbintstatus;		/* SBIntStatus, 0x02c, rev8   */
+	uint32 sbintmask;		/* SBIntMask, 0x030, rev8   */
+	uint32 PAD[3];
+	uint32 tosbmailbox;		/* ToSBMailbox, 0x040, rev8   */
+	uint32 tohostmailbox;		/* ToHostMailbox, 0x044, rev8   */
+	uint32 tosbmailboxdata;		/* ToSbMailboxData, 0x048, rev8   */
+	uint32 tohostmailboxdata;	/* ToHostMailboxData, 0x04c, rev8   */
+
+	/* synchronized access to registers in SDIO clock domain */
+	uint32 sdioaccess;		/* SdioAccess, 0x050, rev8   */
+	uint32 PAD[3];
+
+	/* PCMCIA frame control */
+	uint8  pcmciaframectrl;		/* pcmciaFrameCtrl, 0x060, rev8   */
+	uint8  PAD[3];
+	uint8  pcmciawatermark;		/* pcmciaWaterMark, 0x064, rev8   */
+	uint8  PAD[155];
+
+	/* interrupt batching control */
+	uint32 intrcvlazy;		/* IntRcvLazy, 0x100, rev8 */
+	uint32 PAD[3];
+
+	/* counters */
+	uint32 cmd52rd;			/* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+	uint32 cmd52wr;			/* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+	uint32 cmd53rd;			/* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+	uint32 cmd53wr;			/* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+	uint32 abort;			/* AbortCount, 0x120, rev8, SDIO: aborts */
+	uint32 datacrcerror;		/* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+	uint32 rdoutofsync;		/* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+	uint32 wroutofsync;		/* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+	uint32 writebusy;		/* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+	uint32 readwait;		/* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+	uint32 readterm;		/* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+	uint32 writeterm;		/* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+	uint32 PAD[40];
+	uint32 clockctlstatus;		/* ClockCtlStatus, 0x1e0, rev8 */
+	uint32 PAD[7];
+
+	/* DMA engines */
+	volatile union {
+		pcmdma32_t pcm32;
+		sdiodma32_t sdiod32;
+		sdiodma64_t sdiod64;
+	} dma;
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* PCMCIA FCR, 0x600-6ff, rev6 */
+	uint16 PAD[55];
+
+	/* PCMCIA backplane access */
+	uint16 backplanecsr;		/* BackplaneCSR, 0x76E, rev6 */
+	uint16 backplaneaddr0;		/* BackplaneAddr0, 0x770, rev6 */
+	uint16 backplaneaddr1;		/* BackplaneAddr1, 0x772, rev6 */
+	uint16 backplaneaddr2;		/* BackplaneAddr2, 0x774, rev6 */
+	uint16 backplaneaddr3;		/* BackplaneAddr3, 0x776, rev6 */
+	uint16 backplanedata0;		/* BackplaneData0, 0x778, rev6 */
+	uint16 backplanedata1;		/* BackplaneData1, 0x77a, rev6 */
+	uint16 backplanedata2;		/* BackplaneData2, 0x77c, rev6 */
+	uint16 backplanedata3;		/* BackplaneData3, 0x77e, rev6 */
+	uint16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	uint16 spromstatus;		/* SPROMStatus, 0x7BE, rev2 */
+	uint32 PAD[464];
+
+	/* Sonics SiliconBackplane registers */
+	sbconfig_t sbconfig;		/* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY	(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN	(1 << 1)	/* CCCR RES signal causes backplane reset */
+#define CC_F2RDY	(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO	(1 << 3)	/* clear SDIO pads isolation bit (rev 11) */
+
+/* corestatus */
+#define CS_PCMCIAMODE	(1 << 0)	/* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV	(1 << 1)	/* 1=smartDev enabled */
+#define CS_F2ENABLED	(1 << 2)	/* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK	0x7fff	/* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK	0x7fff	/* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK	0xffff	/* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK		0x07ff	/* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0		/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4		/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip transitioned from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)	/* DMA Errors */
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR	(1 << 8)	/* Backplane SError (write) */
+#define I_SB_RESPERR	(1 << 9)	/* Backplane Response Error (read) */
+#define I_SB_SPROMERR	(1 << 10)	/* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK	0x000000ff	/* Read/Write Data Mask */
+#define SDA_ADDR_MASK	0x000fff00	/* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT	8		/* Read/Write Address Shift */
+#define SDA_WRITE	0x01000000	/* Write bit  */
+#define SDA_READ	0x00000000	/* Write bit cleared for Read */
+#define SDA_BUSY	0x80000000	/* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE		0x000	/* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE	0x100	/* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE	0x200	/* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE	0x300	/* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA	0x006	/* ChipControlData */
+#define SDA_CHIPCONTROLENAB	0x007	/* ChipControlEnable */
+#define SDA_F2WATERMARK		0x008	/* Function 2 Watermark */
+#define SDA_DEVICECONTROL	0x009	/* DeviceControl */
+#define SDA_SBADDRLOW		0x00a	/* SbAddrLow */
+#define SDA_SBADDRMID		0x00b	/* SbAddrMid */
+#define SDA_SBADDRHIGH		0x00c	/* SbAddrHigh */
+#define SDA_FRAMECTRL		0x00d	/* FrameCtrl */
+#define SDA_CHIPCLOCKCSR	0x00e	/* ChipClockCSR */
+#define SDA_SDIOPULLUP		0x00f	/* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW	0x019	/* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH	0x01a	/* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW	0x01b	/* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH	0x01c	/* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK	0x7f	/* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK	0x80	/* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK	0xff	/* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK	0xff	/* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define PFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+
+/* intrcvlazy */
+#define	IRL_TO_MASK	0x00ffffff	/* timeout */
+#define	IRL_FC_MASK	0xff000000	/* frame count */
+#define	IRL_FC_SHIFT	24		/* frame count */
+
+/* rx header */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC		0x0001		/* CRC error detected */
+#define RXF_WOOS	0x0002		/* write frame out of sync */
+#define RXF_WF_TERM	0x0004		/* write frame terminated */
+#define RXF_ABORT	0x0008		/* write frame aborted */
+#define RXF_DISCARD	(RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT)	/* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN	4	/* HW frametag: 2 bytes len, 2 bytes check val */
+
+#endif	/* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/sbsocram.h b/drivers/net/wireless/bcm4329/include/sbsocram.h
new file mode 100644
index 0000000..5ede0b6
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sbsocram.h
@@ -0,0 +1,150 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h,v 13.9.162.2 2008/12/12 14:13:27 Exp $
+ */
+
+
+#ifndef	_SBSOCRAM_H
+#define	_SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	
+
+
+typedef volatile struct sbsocramregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;	
+	uint32	errlogaddr;	
+	
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[17];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;		
+} sbsocramregs_t;
+
+#endif	
+
+
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+
+#define	SRCI_PT_MASK		0x00070000	
+#define	SRCI_PT_SHIFT		16
+
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+
+#define	SRCI_ROMNB_MASK		0xf000
+#define	SRCI_ROMNB_SHIFT	12
+#define	SRCI_ROMBSZ_MASK	0xf00
+#define	SRCI_ROMBSZ_SHIFT	8
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+
+#define SR_BSZ_BASE		14
+
+
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000	
+#define	SRSC_SBYEN_SHIFT	24
+
+
+#define SRPC_PMU_STBYDIS_MASK	0x00000010	
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/sdio.h b/drivers/net/wireless/bcm4329/include/sdio.h
new file mode 100644
index 0000000..280cb84
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sdio.h
@@ -0,0 +1,566 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h,v 13.24.4.1.4.1.16.1 2009/08/12 01:08:02 Exp $
+ */
+
+#ifndef	_SDIO_H
+#define	_SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+	uint8	cccr_sdio_rev;		/* RO, cccr and sdio revision */
+	uint8	sd_rev;			/* RO, sd spec revision */
+	uint8	io_en;			/* I/O enable */
+	uint8	io_rdy;			/* I/O ready reg */
+	uint8	intr_ctl;		/* Master and per function interrupt enable control */
+	uint8	intr_status;		/* RO, interrupt pending status */
+	uint8	io_abort;		/* read/write abort or reset all functions */
+	uint8	bus_inter;		/* bus interface control */
+	uint8	capability;		/* RO, card capability */
+
+	uint8	cis_base_low;		/* 0x9 RO, common CIS base address, LSB */
+	uint8	cis_base_mid;
+	uint8	cis_base_high;		/* 0xB RO, common CIS base address, MSB */
+
+	/* suspend/resume registers */
+	uint8	bus_suspend;		/* 0xC */
+	uint8	func_select;		/* 0xD */
+	uint8	exec_flag;		/* 0xE */
+	uint8	ready_flag;		/* 0xF */
+
+	uint8	fn0_blk_size[2];	/* 0x10(LSB), 0x11(MSB) */
+
+	uint8	power_control;		/* 0x12 (SDIO version 1.10) */
+
+	uint8	speed_control;		/* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV			0x00
+#define SDIOD_CCCR_SDREV		0x01
+#define SDIOD_CCCR_IOEN			0x02
+#define SDIOD_CCCR_IORDY		0x03
+#define SDIOD_CCCR_INTEN		0x04
+#define SDIOD_CCCR_INTPEND		0x05
+#define SDIOD_CCCR_IOABORT		0x06
+#define SDIOD_CCCR_BICTRL		0x07
+#define SDIOD_CCCR_CAPABLITIES		0x08
+#define SDIOD_CCCR_CISPTR_0		0x09
+#define SDIOD_CCCR_CISPTR_1		0x0A
+#define SDIOD_CCCR_CISPTR_2		0x0B
+#define SDIOD_CCCR_BUSSUSP		0x0C
+#define SDIOD_CCCR_FUNCSEL		0x0D
+#define SDIOD_CCCR_EXECFLAGS		0x0E
+#define SDIOD_CCCR_RDYFLAGS		0x0F
+#define SDIOD_CCCR_BLKSIZE_0		0x10
+#define SDIOD_CCCR_BLKSIZE_1		0x11
+#define SDIOD_CCCR_POWER_CONTROL	0x12
+#define SDIOD_CCCR_SPEED_CONTROL	0x13
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_SEPINT		0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK	0xf0	/* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK	0x0f	/* CCCR format version number */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK		0x0f	/* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02	/* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2	0x04	/* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02	/* function 1 I/O ready */
+#define SDIO_FUNC_READY_2	0x04	/* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN	0x1	/* interrupt enable master */
+#define INTR_CTL_FUNC1_EN	0x2	/* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN	0x4	/* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2	/* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2	0x4	/* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL	0x08	/* I/O card reset */
+#define IO_ABORT_FUNC_MASK	0x07	/* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS	0x80	/* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP	0x40	/* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN	0x20	/* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK	0x03	/* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT	0x02	/* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT	0x00	/* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS		0x80	/* 4-bit support for low speed card */
+#define SDIO_CAP_LSC		0x40	/* low speed card */
+#define SDIO_CAP_E4MI		0x20	/* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI		0x10	/* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS		0x08	/* support suspend/resume */
+#define SDIO_CAP_SRW		0x04	/* support read wait */
+#define SDIO_CAP_SMB		0x02	/* support multi-block transfer */
+#define SDIO_CAP_SDC		0x01	/* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC		0x01	/* supports master power control (RO) */
+#define SDIO_POWER_EMPC		0x02	/* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS		0x01	/* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS		0x02	/* enable high-speed [clocking] mode (RW) */
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK	0x01	/* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE		0x02	/* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI	0x04	/* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+	uint8	devctr;			/* device interface, CSA control */
+	uint8	ext_dev;		/* extended standard I/O device type code */
+	uint8	pwr_sel;		/* power selection support */
+	uint8	PAD[6];			/* reserved */
+
+	uint8	cis_low;		/* CIS LSB */
+	uint8	cis_mid;
+	uint8	cis_high;		/* CIS MSB */
+	uint8	csa_low;		/* code storage area, LSB */
+	uint8	csa_mid;
+	uint8	csa_high;		/* code storage area, MSB */
+	uint8	csa_dat_win;		/* data access window to function */
+
+	uint8	fnx_blk_size[2];	/* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS		7
+
+/* SDIO Device FBR Start Address  */
+#define SDIOD_FBR_STARTADDR		0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE			0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n)		((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR		0x00	/* basic info for function */
+#define SDIOD_FBR_EXT_DEV		0x01	/* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL		0x02	/* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0		0x09
+#define SDIOD_FBR_CISPTR_1		0x0A
+#define SDIOD_FBR_CISPTR_2		0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0		0x0C
+#define SDIOD_FBR_CSA_ADDR_1		0x0D
+#define SDIOD_FBR_CSA_ADDR_2		0x0E
+#define SDIOD_FBR_CSA_DATA		0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0		0x10
+#define SDIOD_FBR_BLKSIZE_1		0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC	0x0f	/* device interface code */
+#define SDIOD_FBR_DECVTR_CSA	0x40	/* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN	0x80	/* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE		0	/* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART		1
+#define SDIOD_DIC_BLUETOOTH_A	2
+#define SDIOD_DIC_BLUETOOTH_B	3
+#define SDIOD_DIC_GPS		4
+#define SDIOD_DIC_CAMERA	5
+#define SDIOD_DIC_PHS		6
+#define SDIOD_DIC_WLAN		7
+#define SDIOD_DIC_EXT		0xf	/* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS	0x01	/* supports power selection */
+#define SDIOD_PWR_SEL_EPS	0x02	/* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+#define SDIO_FUNC_3		3
+#define SDIO_FUNC_4		4
+#define SDIO_FUNC_5		5
+#define SDIO_FUNC_6		6
+#define SDIO_FUNC_7		7
+
+#define SD_CARD_TYPE_UNKNOWN	0	/* bad type or unrecognized */
+#define SD_CARD_TYPE_IO		1	/* IO only card */
+#define SD_CARD_TYPE_MEMORY	2	/* memory only card */
+#define SD_CARD_TYPE_COMBO	3	/* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE	2048	/* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE	1	/* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE		31
+#define CARDREG_STATUS_BIT_COMCRCERROR		23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND	22
+#define CARDREG_STATUS_BIT_ERROR		19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3	12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2	11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1	10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0	9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR	4
+
+
+
+#define SD_CMD_GO_IDLE_STATE		0	/* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND		1
+#define SD_CMD_MMC_SET_RCA		3
+#define SD_CMD_IO_SEND_OP_COND		5	/* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD	7
+#define SD_CMD_SEND_CSD			9
+#define SD_CMD_SEND_CID			10
+#define SD_CMD_STOP_TRANSMISSION	12
+#define SD_CMD_SEND_STATUS		13
+#define SD_CMD_GO_INACTIVE_STATE	15
+#define SD_CMD_SET_BLOCKLEN		16
+#define SD_CMD_READ_SINGLE_BLOCK	17
+#define SD_CMD_READ_MULTIPLE_BLOCK	18
+#define SD_CMD_WRITE_BLOCK		24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK	25
+#define SD_CMD_PROGRAM_CSD		27
+#define SD_CMD_SET_WRITE_PROT		28
+#define SD_CMD_CLR_WRITE_PROT		29
+#define SD_CMD_SEND_WRITE_PROT		30
+#define SD_CMD_ERASE_WR_BLK_START	32
+#define SD_CMD_ERASE_WR_BLK_END		33
+#define SD_CMD_ERASE			38
+#define SD_CMD_LOCK_UNLOCK		42
+#define SD_CMD_IO_RW_DIRECT		52	/* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED		53	/* mandatory for SDIO */
+#define SD_CMD_APP_CMD			55
+#define SD_CMD_GEN_CMD			56
+#define SD_CMD_READ_OCR			58
+#define SD_CMD_CRC_ON_OFF		59	/* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS		13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS	22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT	23
+#define SD_ACMD_SD_SEND_OP_COND		41
+#define SD_ACMD_SET_CLR_CARD_DETECT	42
+#define SD_ACMD_SEND_SCR		51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ		0   /* Read_Write: Read */
+#define SD_IO_OP_WRITE		1   /* Read_Write: Write */
+#define SD_IO_RW_NORMAL		0   /* no RAW */
+#define SD_IO_RW_RAW		1   /* RAW */
+#define SD_IO_BYTE_MODE		0   /* Byte Mode */
+#define SD_IO_BLOCK_MODE	1   /* BlockMode */
+#define SD_IO_FIXED_ADDRESS	0   /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS	1   /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+	 (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+	 (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE			0
+#define SD_RSP_NO_1			1
+#define SD_RSP_NO_2			2
+#define SD_RSP_NO_3			3
+#define SD_RSP_NO_4			4
+#define SD_RSP_NO_5			5
+#define SD_RSP_NO_6			6
+
+	/* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR	0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND	0x4000
+#define SD_RSP_MR6_ERROR		0x2000
+
+	/* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT			0x80
+#define SD_RSP_MR1_PARAMETER_ERROR	0x40
+#define SD_RSP_MR1_RFU5			0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR	0x10
+#define SD_RSP_MR1_COM_CRC_ERROR	0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND	0x04
+#define SD_RSP_MR1_RFU1			0x02
+#define SD_RSP_MR1_IDLE_STATE		0x01
+
+	/* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR		0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND	0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1	0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0	0x10
+#define SD_RSP_R5_ERROR			0x08
+#define SD_RSP_R5_RFU			0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR	0x02
+#define SD_RSP_R5_OUT_OF_RANGE		0x01
+
+#define SD_RSP_R5_ERRBITS		0xCB
+
+
+/* ------------------------------------------------
+ *  SDIO Commands and responses
+ *
+ *  I/O only commands are:
+ *      CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0		0
+#define SDIOH_CMD_3		3
+#define SDIOH_CMD_5		5
+#define SDIOH_CMD_7		7
+#define SDIOH_CMD_15		15
+#define SDIOH_CMD_52		52
+#define SDIOH_CMD_53		53
+#define SDIOH_CMD_59		59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE		0
+#define SDIOH_RSP_R1		1
+#define SDIOH_RSP_R2		2
+#define SDIOH_RSP_R3		3
+#define SDIOH_RSP_R4		4
+#define SDIOH_RSP_R5		5
+#define SDIOH_RSP_R6		6
+
+/*
+ *  SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS	0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * 	CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M		BITFIELD_MASK(24)
+#define CMD5_OCR_S		0
+
+#define CMD7_RCA_M		BITFIELD_MASK(16)
+#define CMD7_RCA_S		16
+
+#define CMD_15_RCA_M		BITFIELD_MASK(16)
+#define CMD_15_RCA_S		16
+
+#define CMD52_DATA_M		BITFIELD_MASK(8)  /* Bits [7:0]    - Write Data/Stuff bits of CMD52
+						   */
+#define CMD52_DATA_S		0
+#define CMD52_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD52_REG_ADDR_S	9
+#define CMD52_RAW_M		BITFIELD_MASK(1)  /* Bit  27       - Read after Write flag */
+#define CMD52_RAW_S		27
+#define CMD52_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD52_FUNCTION_S	28
+#define CMD52_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD52_RW_FLAG_S		31
+
+
+#define CMD53_BYTE_BLK_CNT_M	BITFIELD_MASK(9) /* Bits [8:0]     - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S	0
+#define CMD53_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD53_REG_ADDR_S	9
+#define CMD53_OP_CODE_M		BITFIELD_MASK(1)  /* Bit  26       - R/W Operation Code */
+#define CMD53_OP_CODE_S		26
+#define CMD53_BLK_MODE_M	BITFIELD_MASK(1)  /* Bit  27       - Block Mode */
+#define CMD53_BLK_MODE_S	27
+#define CMD53_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD53_FUNCTION_S	28
+#define CMD53_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD53_RW_FLAG_S		31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ *  -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M		BITFIELD_MASK(24) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S		0
+#define RSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S		24
+#define RSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  27      - Memory present */
+#define RSP4_MEM_PRESENT_S	27
+#define RSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S	28
+#define RSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  31      - SDIO card ready */
+#define RSP4_CARD_READY_S	31
+
+#define RSP6_STATUS_M		BITFIELD_MASK(16) /* Bits [15:0]  - Card status bits [19,22,23,12:0]
+						   */
+#define RSP6_STATUS_S		0
+#define RSP6_IO_RCA_M		BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S		16
+
+#define RSP1_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3       - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S	3
+#define RSP1_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5       - Card expects ACMD */
+#define RSP1_APP_CMD_S		5
+#define RSP1_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8       - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S	8
+#define RSP1_CURR_STATE_M	BITFIELD_MASK(4)  /* Bits [12:9] - State of card
+						   * when Cmd was received
+						   */
+#define RSP1_CURR_STATE_S	9
+#define RSP1_EARSE_RESET_M	BITFIELD_MASK(1)  /* Bit 13   - Erase seq cleared */
+#define RSP1_EARSE_RESET_S	13
+#define RSP1_CARD_ECC_DISABLE_M	BITFIELD_MASK(1)  /* Bit 14   - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S	14
+#define RSP1_WP_ERASE_SKIP_M	BITFIELD_MASK(1)  /* Bit 15   - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S	15
+#define RSP1_CID_CSD_OVERW_M	BITFIELD_MASK(1)  /* Bit 16   - Illegal write to CID or R/O bits
+						   * of CSD
+						   */
+#define RSP1_CID_CSD_OVERW_S	16
+#define RSP1_ERROR_M		BITFIELD_MASK(1)  /* Bit 19   - General/Unknown error */
+#define RSP1_ERROR_S		19
+#define RSP1_CC_ERROR_M		BITFIELD_MASK(1)  /* Bit 20   - Internal Card Control error */
+#define RSP1_CC_ERROR_S		20
+#define RSP1_CARD_ECC_FAILED_M	BITFIELD_MASK(1)  /* Bit 21   - Card internal ECC failed
+						   * to correct data
+						   */
+#define RSP1_CARD_ECC_FAILED_S	21
+#define RSP1_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit 22   - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S	22
+#define RSP1_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 23   - CRC check of previous command failed
+						   */
+#define RSP1_COM_CRC_ERROR_S	23
+#define RSP1_LOCK_UNLOCK_FAIL_M	BITFIELD_MASK(1)  /* Bit 24   - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S	24
+#define RSP1_CARD_LOCKED_M	BITFIELD_MASK(1)  /* Bit 25   - Card locked by the host */
+#define RSP1_CARD_LOCKED_S	25
+#define RSP1_WP_VIOLATION_M	BITFIELD_MASK(1)  /* Bit 26   - Attempt to program
+						   * write-protected blocks
+						   */
+#define RSP1_WP_VIOLATION_S	26
+#define RSP1_ERASE_PARAM_M	BITFIELD_MASK(1)  /* Bit 27   - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S	27
+#define RSP1_ERASE_SEQ_ERR_M	BITFIELD_MASK(1)  /* Bit 28   - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S	28
+#define RSP1_BLK_LEN_ERR_M	BITFIELD_MASK(1)  /* Bit 29   - Block length error */
+#define RSP1_BLK_LEN_ERR_S	29
+#define RSP1_ADDR_ERR_M		BITFIELD_MASK(1)  /* Bit 30   - Misaligned address */
+#define RSP1_ADDR_ERR_S		30
+#define RSP1_OUT_OF_RANGE_M	BITFIELD_MASK(1)  /* Bit 31   - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S	31
+
+
+#define RSP5_DATA_M		BITFIELD_MASK(8)  /* Bits [0:7]   - data */
+#define RSP5_DATA_S		0
+#define RSP5_FLAGS_M		BITFIELD_MASK(8)  /* Bit  [15:8]  - Rsp flags */
+#define RSP5_FLAGS_S		8
+#define RSP5_STUFF_M		BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S		16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M	BITFIELD_MASK(16) /* Bits [15:0]    - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S	0
+#define SPIRSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [18:16]   - Stuff bits */
+#define SPIRSP4_STUFF_S		16
+#define SPIRSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  19        - Memory present */
+#define SPIRSP4_MEM_PRESENT_S	19
+#define SPIRSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [22:20]   - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S	20
+#define SPIRSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  23        - SDIO card ready */
+#define SPIRSP4_CARD_READY_S	23
+#define SPIRSP4_IDLE_STATE_M	BITFIELD_MASK(1)  /* Bit  24        - idle state */
+#define SPIRSP4_IDLE_STATE_S	24
+#define SPIRSP4_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S	26
+#define SPIRSP4_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S	27
+#define SPIRSP4_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP4_FUNC_NUM_ERROR_S	28
+#define SPIRSP4_PARAM_ERROR_M	BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S	30
+#define SPIRSP4_START_BIT_M	BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP4_START_BIT_S	31
+
+#define SPIRSP5_DATA_M			BITFIELD_MASK(8)  /* Bits [23:16]   - R/W Data */
+#define SPIRSP5_DATA_S			16
+#define SPIRSP5_IDLE_STATE_M		BITFIELD_MASK(1)  /* Bit  24        - Idle state */
+#define SPIRSP5_IDLE_STATE_S		24
+#define SPIRSP5_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S		26
+#define SPIRSP5_COM_CRC_ERROR_M		BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S		27
+#define SPIRSP5_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP5_FUNC_NUM_ERROR_S	28
+#define SPIRSP5_PARAM_ERROR_M		BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S		30
+#define SPIRSP5_START_BIT_M		BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP5_START_BIT_S		31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3	- Authentication seq error
+							   */
+#define RSP6STAT_AKE_SEQ_ERROR_S	3
+#define RSP6STAT_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5	- Card expects ACMD */
+#define RSP6STAT_APP_CMD_S		5
+#define RSP6STAT_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8	- Ready for data
+							   * (buff empty)
+							   */
+#define RSP6STAT_READY_FOR_DATA_S	8
+#define RSP6STAT_CURR_STATE_M		BITFIELD_MASK(4)  /* Bits [12:9] - Card state at
+							   * Cmd reception
+							   */
+#define RSP6STAT_CURR_STATE_S		9
+#define RSP6STAT_ERROR_M		BITFIELD_MASK(1)  /* Bit 13  - General/Unknown error Bit 19
+							   */
+#define RSP6STAT_ERROR_S		13
+#define RSP6STAT_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit 14  - Illegal cmd for
+							   * card state Bit 22
+							   */
+#define RSP6STAT_ILLEGAL_CMD_S		14
+#define RSP6STAT_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 15  - CRC previous command
+							   * failed Bit 23
+							   */
+#define RSP6STAT_COM_CRC_ERROR_S	15
+
+#define SDIOH_XFER_TYPE_READ    SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE   SD_IO_OP_WRITE
+
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcm4329/include/sdioh.h b/drivers/net/wireless/bcm4329/include/sdioh.h
new file mode 100644
index 0000000..8123452
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sdioh.h
@@ -0,0 +1,299 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h,v 13.13.18.1.16.3 2009/12/08 22:34:21 Exp $
+ */
+
+#ifndef	_SDIOH_H
+#define	_SDIOH_H
+
+#define SD_SysAddr			0x000
+#define SD_BlockSize			0x004
+#define SD_BlockCount 			0x006
+#define SD_Arg0				0x008
+#define SD_Arg1 			0x00A
+#define SD_TransferMode			0x00C
+#define SD_Command 			0x00E
+#define SD_Response0			0x010
+#define SD_Response1 			0x012
+#define SD_Response2			0x014
+#define SD_Response3 			0x016
+#define SD_Response4			0x018
+#define SD_Response5 			0x01A
+#define SD_Response6			0x01C
+#define SD_Response7 			0x01E
+#define SD_BufferDataPort0		0x020
+#define SD_BufferDataPort1 		0x022
+#define SD_PresentState			0x024
+#define SD_HostCntrl			0x028
+#define SD_PwrCntrl			0x029
+#define SD_BlockGapCntrl 		0x02A
+#define SD_WakeupCntrl 			0x02B
+#define SD_ClockCntrl			0x02C
+#define SD_TimeoutCntrl 		0x02E
+#define SD_SoftwareReset		0x02F
+#define SD_IntrStatus			0x030
+#define SD_ErrorIntrStatus 		0x032
+#define SD_IntrStatusEnable		0x034
+#define SD_ErrorIntrStatusEnable 	0x036
+#define SD_IntrSignalEnable		0x038
+#define SD_ErrorIntrSignalEnable 	0x03A
+#define SD_CMD12ErrorStatus		0x03C
+#define SD_Capabilities			0x040
+#define SD_Capabilities_Reserved	0x044
+#define SD_MaxCurCap			0x048
+#define SD_MaxCurCap_Reserved		0x04C
+#define SD_ADMA_SysAddr			0x58
+#define SD_SlotInterruptStatus		0x0FC
+#define SD_HostControllerVersion 	0x0FE
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo	0x40
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M 	BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 	0
+#define CAP_TO_CLKUNIT_M  	BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 	7
+#define CAP_BASECLK_M 		BITFIELD_MASK(6)
+#define CAP_BASECLK_S 		8
+#define CAP_MAXBLOCK_M 		BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S		16
+#define CAP_ADMA2_M		BITFIELD_MASK(1)
+#define CAP_ADMA2_S		19
+#define CAP_ADMA1_M		BITFIELD_MASK(1)
+#define CAP_ADMA1_S		20
+#define CAP_HIGHSPEED_M		BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S		21
+#define CAP_DMA_M		BITFIELD_MASK(1)
+#define CAP_DMA_S		22
+#define CAP_SUSPEND_M		BITFIELD_MASK(1)
+#define CAP_SUSPEND_S		23
+#define CAP_VOLT_3_3_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S		24
+#define CAP_VOLT_3_0_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S		25
+#define CAP_VOLT_1_8_M		BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S		26
+#define CAP_64BIT_HOST_M	BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S	28
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S		0
+#define CAP_CURR_3_0_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S		8
+#define CAP_CURR_1_8_M		BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S		16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M		BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S		0
+#define BLKSZ_BNDRY_M		BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S		12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes  */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M   	BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S	0
+#define XFER_BLK_COUNT_EN_M 	BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S	1
+#define XFER_CMD_12_EN_M    	BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 	2
+#define XFER_DATA_DIRECTION_M	BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S	4
+#define XFER_MULTI_BLOCK_M	BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S	5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 		0
+#define RESP_TYPE_136  		1
+#define RESP_TYPE_48   		2
+#define RESP_TYPE_48_BUSY	3
+/* type field */
+#define CMD_TYPE_NORMAL		0
+#define CMD_TYPE_SUSPEND	1
+#define CMD_TYPE_RESUME		2
+#define CMD_TYPE_ABORT		3
+
+#define CMD_RESP_TYPE_M		BITFIELD_MASK(2)	/* Bits [0-1] 	- Response type */
+#define CMD_RESP_TYPE_S		0
+#define CMD_CRC_EN_M		BITFIELD_MASK(1)	/* Bit 3 	- CRC enable */
+#define CMD_CRC_EN_S		3
+#define CMD_INDEX_EN_M		BITFIELD_MASK(1)	/* Bit 4 	- Enable index checking */
+#define CMD_INDEX_EN_S		4
+#define CMD_DATA_EN_M		BITFIELD_MASK(1)	/* Bit 5 	- Using DAT line */
+#define CMD_DATA_EN_S		5
+#define CMD_TYPE_M		BITFIELD_MASK(2)	/* Bit [6-7] 	- Normal, abort, resume, etc
+							 */
+#define CMD_TYPE_S		6
+#define CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [8-13] 	- Command number */
+#define CMD_INDEX_S		8
+
+/* SD_BufferDataPort0	: Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 	: Offset 0x022, size = 2 bytes */
+/* SD_PresentState	: Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 0	May use CMD */
+#define PRES_CMD_INHIBIT_S	0
+#define PRES_DAT_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 1	May use DAT */
+#define PRES_DAT_INHIBIT_S	1
+#define PRES_DAT_BUSY_M		BITFIELD_MASK(1)	/* Bit 2	DAT is busy */
+#define PRES_DAT_BUSY_S		2
+#define PRES_PRESENT_RSVD_M	BITFIELD_MASK(5)	/* Bit [3-7]	rsvd */
+#define PRES_PRESENT_RSVD_S	3
+#define PRES_WRITE_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 8	Write is active */
+#define PRES_WRITE_ACTIVE_S	8
+#define PRES_READ_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 9	Read is active */
+#define PRES_READ_ACTIVE_S	9
+#define PRES_WRITE_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 10	Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S	10
+#define PRES_READ_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 11	Read buf data avail */
+#define PRES_READ_DATA_RDY_S	11
+#define PRES_CARD_PRESENT_M	BITFIELD_MASK(1)	/* Bit 16	Card present - debounced */
+#define PRES_CARD_PRESENT_S	16
+#define PRES_CARD_STABLE_M	BITFIELD_MASK(1)	/* Bit 17	Debugging */
+#define PRES_CARD_STABLE_S	17
+#define PRES_CARD_PRESENT_RAW_M	BITFIELD_MASK(1)	/* Bit 18	Not debounced */
+#define PRES_CARD_PRESENT_RAW_S	18
+#define PRES_WRITE_ENABLED_M	BITFIELD_MASK(1)	/* Bit 19	Write protected? */
+#define PRES_WRITE_ENABLED_S	19
+#define PRES_DAT_SIGNAL_M	BITFIELD_MASK(4)	/* Bit [20-23]	Debugging */
+#define PRES_DAT_SIGNAL_S	20
+#define PRES_CMD_SIGNAL_M	BITFIELD_MASK(1)	/* Bit 24	Debugging */
+#define PRES_CMD_SIGNAL_S	24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M		BITFIELD_MASK(1)	/* Bit 0	LED On/Off */
+#define HOST_LED_S		0
+#define HOST_DATA_WIDTH_M	BITFIELD_MASK(1)	/* Bit 1	4 bit enable */
+#define HOST_DATA_WIDTH_S	1
+#define HOST_HI_SPEED_EN_M	BITFIELD_MASK(1)	/* Bit 2	High speed vs low speed */
+#define HOST_DMA_SEL_S		3
+#define HOST_DMA_SEL_M		BITFIELD_MASK(2)	/* Bit 4:3	DMA Select */
+#define HOST_HI_SPEED_EN_S	2
+
+/* misc defines */
+#define SD1_MODE 		0x1	/* SD Host Cntrlr Spec */
+#define SD4_MODE 		0x2	/* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M		BITFIELD_MASK(1)	/* Bit 0	Power the bus */
+#define PWR_BUS_EN_S		0
+#define PWR_VOLTS_M		BITFIELD_MASK(3)	/* Bit [1-3]	Voltage Select */
+#define PWR_VOLTS_S		1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M		BITFIELD_MASK(1)	/* Bit 0	Reset All */
+#define SW_RESET_ALL_S		0
+#define SW_RESET_CMD_M		BITFIELD_MASK(1)	/* Bit 1	CMD Line Reset */
+#define SW_RESET_CMD_S		1
+#define SW_RESET_DAT_M		BITFIELD_MASK(1)	/* Bit 2	DAT Line Reset */
+#define SW_RESET_DAT_S		2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M		BITFIELD_MASK(1)	/* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S		0
+#define INTSTAT_XFER_COMPLETE_M		BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S		1
+#define INTSTAT_BLOCK_GAP_EVENT_M	BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S	2
+#define INTSTAT_DMA_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S		3
+#define INTSTAT_BUF_WRITE_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S	4
+#define INTSTAT_BUF_READ_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S	5
+#define INTSTAT_CARD_INSERTION_M	BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S	6
+#define INTSTAT_CARD_REMOVAL_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S		7
+#define INTSTAT_CARD_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S		8
+#define INTSTAT_ERROR_INT_M		BITFIELD_MASK(1)	/* Bit 15 */
+#define INTSTAT_ERROR_INT_S		15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S		0
+#define ERRINT_CMD_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S		1
+#define ERRINT_CMD_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S		2
+#define ERRINT_CMD_INDEX_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S		3
+#define ERRINT_DATA_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S		4
+#define ERRINT_DATA_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S		5
+#define ERRINT_DATA_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S		6
+#define ERRINT_CURRENT_LIMIT_M		BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S		7
+#define ERRINT_AUTO_CMD12_M		BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S		8
+#define ERRINT_VENDOR_M			BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S			12
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT		0x0001
+#define ERRINT_CMD_CRC_BIT		0x0002
+#define ERRINT_CMD_ENDBIT_BIT		0x0004
+#define ERRINT_CMD_INDEX_BIT		0x0008
+#define ERRINT_DATA_TIMEOUT_BIT		0x0010
+#define ERRINT_DATA_CRC_BIT		0x0020
+#define ERRINT_DATA_ENDBIT_BIT		0x0040
+#define ERRINT_CURRENT_LIMIT_BIT	0x0080
+#define ERRINT_AUTO_CMD12_BIT		0x0100
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS		(ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+				 ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS	(ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+				 ERRINT_DATA_ENDBIT_BIT)
+#define ERRINT_TRANSFER_ERRS	(ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl	: Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl 	: Offset 0x02E , size = bytes */
+/* SD_IntrStatus	: Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus 	: Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable	: Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable	: Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus	: Offset 0x03C , size = bytes */
+/* SD_Capabilities	: Offset 0x040 , size = bytes */
+/* SD_MaxCurCap		: Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcm4329/include/sdiovar.h b/drivers/net/wireless/bcm4329/include/sdiovar.h
new file mode 100644
index 0000000..0179d4c
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h,v 13.5.14.2.16.2 2009/12/08 22:34:21 Exp $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+	int func;
+	int offset;
+	int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL		0x0001	/* Error */
+#define SDH_TRACE_VAL		0x0002	/* Trace */
+#define SDH_INFO_VAL		0x0004	/* Info */
+#define SDH_DEBUG_VAL		0x0008	/* Debug */
+#define SDH_DATA_VAL		0x0010	/* Data */
+#define SDH_CTRL_VAL		0x0020	/* Control Regs */
+#define SDH_LOG_VAL		0x0040	/* Enable bcmlog */
+#define SDH_DMA_VAL		0x0080	/* DMA */
+
+#define NUM_PREV_TRANSACTIONS	16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcm4329/include/siutils.h b/drivers/net/wireless/bcm4329/include/siutils.h
new file mode 100644
index 0000000..cb9f140
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/siutils.h
@@ -0,0 +1,235 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h,v 13.197.4.2.4.3.8.16 2010/06/23 21:36:05 Exp $
+ */
+
+
+#ifndef	_siutils_h_
+#define	_siutils_h_
+
+
+struct si_pub {
+	uint	socitype;		
+
+	uint	bustype;		
+	uint	buscoretype;		
+	uint	buscorerev;		
+	uint	buscoreidx;		
+	int	ccrev;			
+	uint32	cccaps;			
+	int	pmurev;			
+	uint32	pmucaps;		
+	uint	boardtype;		
+	uint	boardvendor;		
+	uint	boardflags;		
+	uint	chip;			
+	uint	chiprev;		
+	uint	chippkg;		
+	uint32	chipst;			
+	bool	issim;			
+	uint    socirev;		
+	bool	pci_pr32414;
+};
+
+#if defined(WLC_HIGH) && !defined(WLC_LOW)
+typedef struct si_pub si_t;
+#else
+typedef const struct si_pub si_t;
+#endif
+
+
+#define	SI_OSH		NULL	
+
+
+#define	XTAL			0x1	
+#define	PLL			0x2	
+
+
+#define	CLK_FAST		0	
+#define	CLK_DYNAMIC		2	
+
+
+#define GPIO_DRV_PRIORITY	0	
+#define GPIO_APP_PRIORITY	1	
+#define GPIO_HI_PRIORITY	2	
+
+
+#define GPIO_PULLUP		0
+#define GPIO_PULLDN		1
+
+
+#define GPIO_REGEVT		0	
+#define GPIO_REGEVT_INTMSK	1	
+#define GPIO_REGEVT_INTPOL	2	
+
+
+#define SI_DEVPATH_BUFSZ	16	
+
+
+#define	SI_DOATTACH	1
+#define SI_PCIDOWN	2
+#define SI_PCIUP	3
+
+#define	ISSIM_ENAB(sih)	0
+
+
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih)	(BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih)	((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih)		(0)
+#define CCPLL_ENAB(sih)		(0)
+#else
+#define CCCTL_ENAB(sih)		((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih)		((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+
+
+
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void *si_coreregs(si_t *sih);
+extern void si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_tofixup(si_t *sih);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint32 si_clock(si_t *sih);
+extern void si_clock_pmu_spuravoid(si_t *sih, bool spuravoid);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+	void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern bool si_backplane64(si_t *sih);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+
+
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci_init(sih) (0)
+#define si_eci_notify_bt(sih, type, val, interrupt)  (0)
+
+
+
+extern int si_devpath(si_t *sih, char *path, int size);
+
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pcie_war_ovr_disable(si_t *sih);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+
+
+
+
+
+
+
+#endif	
diff --git a/drivers/net/wireless/bcm4329/include/spid.h b/drivers/net/wireless/bcm4329/include/spid.h
new file mode 100644
index 0000000..c740296
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/spid.h
@@ -0,0 +1,153 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: spid.h,v 1.7.10.1.16.3 2009/04/09 19:23:14 Exp $
+ */
+
+#ifndef	_SPI_H
+#define	_SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+	uint8	config;			/* 0x00, len, endian, clock, speed, polarity, wakeup */
+	uint8	response_delay;		/* 0x01, read response delay in bytes (corerev < 3) */
+	uint8	status_enable;		/* 0x02, status-enable, intr with status, response_delay
+					 * function selection, command/data error check
+					 */
+	uint8	reset_bp;		/* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+	uint16	intr_reg;		/* 0x04, Intr status register */
+	uint16	intr_en_reg;		/* 0x06, Intr mask register */
+	uint32	status_reg;		/* 0x08, RO, Status bits of last spi transfer */
+	uint16	f1_info_reg;		/* 0x0c, RO, enabled, ready for data transfer, blocksize */
+	uint16	f2_info_reg;		/* 0x0e, RO, enabled, ready for data transfer, blocksize */
+	uint16	f3_info_reg;		/* 0x10, RO, enabled, ready for data transfer, blocksize */
+	uint32	test_read;		/* 0x14, RO 0xfeedbead signature */
+	uint32	test_rw;		/* 0x18, RW */
+	uint8	resp_delay_f0;		/* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+	uint8	resp_delay_f1;		/* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+	uint8	resp_delay_f2;		/* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+	uint8	resp_delay_f3;		/* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG			0x00
+#define SPID_RESPONSE_DELAY		0x01
+#define SPID_STATUS_ENABLE		0x02
+#define SPID_RESET_BP			0x03	/* (corerev >= 1) */
+#define SPID_INTR_REG			0x04	/* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG		0x06	/* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG			0x08	/* 32 bits */
+#define SPID_F1_INFO_REG		0x0C	/* 16 bits */
+#define SPID_F2_INFO_REG		0x0E	/* 16 bits */
+#define SPID_F3_INFO_REG		0x10	/* 16 bits */
+#define SPID_TEST_READ			0x14	/* 32 bits */
+#define SPID_TEST_RW			0x18	/* 32 bits */
+#define SPID_RESP_DELAY_F0		0x1c	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1		0x1d	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2		0x1e	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3		0x1f	/* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32	0x1	/* 0/1 16/32 bit word length */
+#define ENDIAN_BIG	0x2	/* 0/1 Little/Big Endian */
+#define CLOCK_PHASE	0x4	/* 0/1 clock phase delay */
+#define CLOCK_POLARITY	0x8	/* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE	0x10	/* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY	0x20	/* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP		0x80	/* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK	0xFF	/* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE		0x1	/* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS	0x2	/* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL		0x4	/* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN	0x8	/* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN		0x20	/* Command error check enable */
+#define DATA_ERR_CHK_EN		0x40	/* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET	0x4	/* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET	0x8	/* enable reset for BT backplane */
+#define RESET_SPI		0x80	/* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE	0x0001	/* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW	0x0002
+#define F2_F3_FIFO_WR_OVERFLOW	0x0004
+#define COMMAND_ERROR		0x0008	/* Cleared by writing 1 */
+#define DATA_ERROR		0x0010	/* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE	0x0020
+#define F3_PACKET_AVAILABLE	0x0040
+#define F1_OVERFLOW		0x0080	/* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0		0x0100
+#define MISC_INTR1		0x0200
+#define MISC_INTR2		0x0400
+#define MISC_INTR3		0x0800
+#define MISC_INTR4		0x1000
+#define F1_INTR			0x2000
+#define F2_INTR			0x4000
+#define F3_INTR			0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE	0x00000001
+#define STATUS_UNDERFLOW		0x00000002
+#define STATUS_OVERFLOW			0x00000004
+#define STATUS_F2_INTR			0x00000008
+#define STATUS_F3_INTR			0x00000010
+#define STATUS_F2_RX_READY		0x00000020
+#define STATUS_F3_RX_READY		0x00000040
+#define STATUS_HOST_CMD_DATA_ERR	0x00000080
+#define STATUS_F2_PKT_AVAILABLE		0x00000100
+#define STATUS_F2_PKT_LEN_MASK		0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT		9
+#define STATUS_F3_PKT_AVAILABLE		0x00100000
+#define STATUS_F3_PKT_LEN_MASK		0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT		21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 			0x0001
+#define F1_RDY_FOR_DATA_TRANSFER	0x0002
+#define F1_MAX_PKT_SIZE			0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 			0x0001
+#define F2_RDY_FOR_DATA_TRANSFER	0x0002
+#define F2_MAX_PKT_SIZE			0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 			0x0001
+#define F3_RDY_FOR_DATA_TRANSFER	0x0002
+#define F3_MAX_PKT_SIZE			0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE		0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS		4
+
+#define SPI_MAX_PKT_LEN		(2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0		0
+#define SPI_FUNC_1		1
+#define SPI_FUNC_2		2
+#define SPI_FUNC_3		3
+
+#define WAIT_F2RXFIFORDY	100
+#define WAIT_F2RXFIFORDY_DELAY	20
+
+#endif /* _SPI_H */
diff --git a/drivers/net/wireless/bcm4329/include/trxhdr.h b/drivers/net/wireless/bcm4329/include/trxhdr.h
new file mode 100644
index 0000000..8f5eed9
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/trxhdr.h
@@ -0,0 +1,46 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h,v 13.11.310.1 2008/08/17 12:58:58 Exp $
+ */
+
+#include <typedefs.h>
+
+#define TRX_MAGIC	0x30524448	/* "HDR0" */
+#define TRX_VERSION	1		/* Version 1 */
+#define TRX_MAX_LEN	0x3A0000	/* Max length */
+#define TRX_NO_HEADER	1		/* Do not write TRX header */
+#define TRX_GZ_FILES	0x2     /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_MAX_OFFSET	3		/* Max number of individual files */
+#define TRX_UNCOMP_IMAGE	0x20	/* Trx contains uncompressed rtecdc.bin image */
+
+struct trx_header {
+	uint32 magic;		/* "HDR0" */
+	uint32 len;		/* Length of file including header */
+	uint32 crc32;		/* 32-bit CRC from flag_version to end of file */
+	uint32 flag_version;	/* 0:15 flags, 16:31 version */
+	uint32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of header */
+};
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
diff --git a/drivers/net/wireless/bcm4329/include/typedefs.h b/drivers/net/wireless/bcm4329/include/typedefs.h
new file mode 100644
index 0000000..4d9dd76
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/typedefs.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h,v 1.85.34.1.2.5 2009/01/27 04:09:40 Exp $
+ */
+
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+
+
+#include "site_typedefs.h"
+
+#else
+
+
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE	false
+#endif
+#ifndef TRUE
+#define TRUE	true
+#endif
+
+#else	
+
+
+#endif	
+
+#if defined(__x86_64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+#if defined(TARGETOS_nucleus)
+
+#include <stddef.h>
+
+
+#define TYPEDEF_FLOAT_T
+#endif   
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+#ifdef __DJGPP__
+typedef long unsigned int size_t;
+#endif 
+
+
+
+
+
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif 
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif	
+#endif	
+
+
+
+
+
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif
+
+
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif 
+
+#if !defined(__DJGPP__) && !defined(TARGETOS_nucleus)
+
+
+#if defined(__KERNEL__)
+
+#include <linux/types.h>	
+
+#else
+
+
+#include <sys/types.h>
+
+#endif 
+
+#endif 
+
+
+
+
+#define USE_TYPEDEF_DEFAULTS
+
+#endif 
+
+
+
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef	 unsigned char	bool;
+#endif
+
+
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char	uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short	ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int	uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long	ulong;
+#endif
+
+
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char	uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short	uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int	uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int	uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char	int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short	int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int	int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT32
+typedef float		float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double		float64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else 
+typedef float64 float_t;
+#endif
+
+#endif 
+
+
+
+#ifndef FALSE
+#define FALSE	0
+#endif
+
+#ifndef TRUE
+#define TRUE	1  
+#endif
+
+#ifndef NULL
+#define	NULL	0
+#endif
+
+#ifndef OFF
+#define	OFF	0
+#endif
+
+#ifndef ON
+#define	ON	1  
+#endif
+
+#define	AUTO	(-1) 
+
+
+
+#ifndef PTRSZ
+#define	PTRSZ	sizeof(char*)
+#endif
+
+
+
+#if defined(__GNUC__)
+	#define BWL_COMPILER_GNU
+#elif defined(__CC_ARM)
+	#define BWL_COMPILER_ARMCC
+#else
+	#error "Unknown compiler!"
+#endif 
+
+
+#ifndef INLINE
+	#if defined(BWL_COMPILER_MICROSOFT)
+		#define INLINE __inline
+	#elif defined(BWL_COMPILER_GNU)
+		#define INLINE __inline__
+	#elif defined(BWL_COMPILER_ARMCC)
+		#define INLINE	__inline
+	#else
+		#define INLINE
+	#endif 
+#endif 
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif 
+
+
+#define UNUSED_PARAMETER(x) (void)(x)
+
+
+#include <bcmdefs.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/include/wlioctl.h b/drivers/net/wireless/bcm4329/include/wlioctl.h
new file mode 100644
index 0000000..00c61f1
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/include/wlioctl.h
@@ -0,0 +1,1673 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h,v 1.601.4.15.2.14.2.62.4.3 2011/02/09 23:31:02 Exp $
+ */
+
+
+#ifndef _wlioctl_h_
+#define	_wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <bcmwifi.h>
+
+
+
+#define ACTION_FRAME_SIZE 1040
+
+typedef struct wl_action_frame {
+	struct ether_addr	da;
+	uint16				len;
+	uint32				packetId;
+	uint8				data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+#define RWL_ACTION_WIFI_CATEGORY	127  
+#define RWL_WIFI_OUI_BYTE1		0x90 
+#define RWL_WIFI_OUI_BYTE2		0x4C
+#define RWL_WIFI_OUI_BYTE3		0x0F
+#define RWL_WIFI_ACTION_FRAME_SIZE	sizeof(struct dot11_action_wifi_vendor_specific)
+#define RWL_WIFI_DEFAULT                0x00
+#define RWL_WIFI_FIND_MY_PEER		0x09 
+#define RWL_WIFI_FOUND_PEER		0x0A 
+#define RWL_ACTION_WIFI_FRAG_TYPE	0x55 
+
+typedef struct ssid_info
+{
+	uint8		ssid_len;		
+	uint8		ssid[32];		
+} ssid_info_t;
+
+typedef struct cnt_rx
+{
+	uint32 cnt_rxundec;
+	uint32 cnt_rxframe;
+} cnt_rx_t;
+
+
+
+#define RWL_REF_MAC_ADDRESS_OFFSET	17
+#define RWL_DUT_MAC_ADDRESS_OFFSET	23
+#define RWL_WIFI_CLIENT_CHANNEL_OFFSET	50
+#define RWL_WIFI_SERVER_CHANNEL_OFFSET	51
+
+
+
+
+
+#define	WL_BSS_INFO_VERSION	108		
+
+
+typedef struct wl_bss_info {
+	uint32		version;		
+	uint32		length;			
+	struct ether_addr BSSID;
+	uint16		beacon_period;		
+	uint16		capability;		
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			
+		uint8	rates[16];		
+	} rateset;				
+	chanspec_t	chanspec;		
+	uint16		atim_window;		
+	uint8		dtim_period;		
+	int16		RSSI;			
+	int8		phy_noise;		
+
+	uint8		n_cap;			
+	uint32		nbss_cap;		
+	uint8		ctl_ch;			
+	uint32		reserved32[1];		
+	uint8		flags;			
+	uint8		reserved[3];		
+	uint8		basic_mcs[MCSSET_LEN];	
+
+	uint16		ie_offset;		
+	uint32		ie_length;		
+	
+	
+} wl_bss_info_t;
+
+typedef struct wlc_ssid {
+	uint32		SSID_len;
+	uchar		SSID[32];
+} wlc_ssid_t;
+
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY   2
+
+
+#define WL_SCANFLAGS_PASSIVE 0x01
+#define WL_SCANFLAGS_PROHIBITED	0x04
+
+typedef struct wl_scan_params {
+	wlc_ssid_t ssid;		
+	struct ether_addr bssid;	
+	int8 bss_type;			
+	int8 scan_type;		
+	int32 nprobes;			
+	int32 active_time;		
+	int32 passive_time;		
+	int32 home_time;		
+	int32 channel_num;		
+	uint16 channel_list[1];		
+} wl_scan_params_t;
+
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START      1
+#define WL_SCAN_ACTION_CONTINUE   2
+#define WL_SCAN_ACTION_ABORT      3
+
+#define ISCAN_REQ_VERSION 1
+
+
+typedef struct wl_iscan_params {
+	uint32 version;
+	uint16 action;
+	uint16 scan_duration;
+	wl_scan_params_t params;
+} wl_iscan_params_t;
+
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+#define WL_SCAN_RESULTS_FIXED_SIZE 12
+
+
+#define WL_SCAN_RESULTS_SUCCESS	0
+#define WL_SCAN_RESULTS_PARTIAL	1
+#define WL_SCAN_RESULTS_PENDING	2
+#define WL_SCAN_RESULTS_ABORTED	3
+#define WL_SCAN_RESULTS_NO_MEM	4
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+	uint32 version;
+	uint16 action;
+	uint16 sync_id;
+	wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+	uint32 buflen;
+	uint32 version;
+	uint16 sync_id;
+	uint16 bss_count;
+	wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+
+typedef struct wl_iscan_results {
+	uint32 status;
+	wl_scan_results_t results;
+} wl_iscan_results_t;
+
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+	(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+#define WL_NUMRATES		16	
+typedef struct wl_rateset {
+	uint32	count;			
+	uint8	rates[WL_NUMRATES];	
+} wl_rateset_t;
+
+
+typedef struct wl_uint32_list {
+	
+	uint32 count;
+	
+	uint32 element[1];
+} wl_uint32_list_t;
+
+
+typedef struct wl_assoc_params {
+	struct ether_addr bssid;	
+	uint16 bssid_cnt;		
+	int32 chanspec_num;		
+	chanspec_t chanspec_list[1];	
+} wl_assoc_params_t;
+#define WL_ASSOC_PARAMS_FIXED_SIZE 	(sizeof(wl_assoc_params_t) - sizeof(chanspec_t))
+
+
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+
+typedef struct wl_join_params {
+	wlc_ssid_t ssid;
+	wl_assoc_params_t params;	
+} wl_join_params_t;
+#define WL_JOIN_PARAMS_FIXED_SIZE 	(sizeof(wl_join_params_t) - sizeof(chanspec_t))
+
+#define WLC_CNTRY_BUF_SZ	4		
+
+typedef struct wl_country {
+	char country_abbrev[WLC_CNTRY_BUF_SZ];
+	int32 rev;
+	char ccode[WLC_CNTRY_BUF_SZ];
+} wl_country_t;
+
+typedef enum sup_auth_status {
+	
+	WLC_SUP_DISCONNECTED = 0,
+	WLC_SUP_CONNECTING,
+	WLC_SUP_IDREQUIRED,
+	WLC_SUP_AUTHENTICATING,
+	WLC_SUP_AUTHENTICATED,
+	WLC_SUP_KEYXCHANGE,
+	WLC_SUP_KEYED,
+	WLC_SUP_TIMEOUT,
+	WLC_SUP_LAST_BASIC_STATE,
+
+	
+	WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+	                                
+	WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+	                                
+	WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+	                                
+	WLC_SUP_KEYXCHANGE_PREP_M4,		
+	WLC_SUP_KEYXCHANGE_WAIT_G1,		
+	WLC_SUP_KEYXCHANGE_PREP_G2		
+} sup_auth_status_t;
+
+
+#define	CRYPTO_ALGO_OFF			0
+#define	CRYPTO_ALGO_WEP1		1
+#define	CRYPTO_ALGO_TKIP		2
+#define	CRYPTO_ALGO_WEP128		3
+#define CRYPTO_ALGO_AES_CCM		4
+#define CRYPTO_ALGO_AES_OCB_MSDU	5
+#define CRYPTO_ALGO_AES_OCB_MPDU	6
+#define CRYPTO_ALGO_NALG		7
+#define CRYPTO_ALGO_SMS4		11
+
+#define WSEC_GEN_MIC_ERROR	0x0001
+#define WSEC_GEN_REPLAY		0x0002
+#define WSEC_GEN_ICV_ERROR	0x0004
+
+#define WL_SOFT_KEY	(1 << 0)	
+#define WL_PRIMARY_KEY	(1 << 1)	
+#define WL_KF_RES_4	(1 << 4)	
+#define WL_KF_RES_5	(1 << 5)	
+#define WL_IBSS_PEER_GROUP_KEY	(1 << 6)	
+
+typedef struct wl_wsec_key {
+	uint32		index;		
+	uint32		len;		
+	uint8		data[DOT11_MAX_KEY_SIZE];	
+	uint32		pad_1[18];
+	uint32		algo;		
+	uint32		flags;		
+	uint32		pad_2[2];
+	int		pad_3;
+	int		iv_initialized;	
+	int		pad_4;
+	
+	struct {
+		uint32	hi;		
+		uint16	lo;		
+	} rxiv;
+	uint32		pad_5[2];
+	struct ether_addr ea;		
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN	8
+#define WSEC_MAX_PSK_LEN	64
+
+
+#define WSEC_PASSPHRASE		(1<<0)
+
+
+typedef struct {
+	ushort	key_len;		
+	ushort	flags;			
+	uint8	key[WSEC_MAX_PSK_LEN];	
+} wsec_pmk_t;
+
+
+#define WEP_ENABLED		0x0001
+#define TKIP_ENABLED		0x0002
+#define AES_ENABLED		0x0004
+#define WSEC_SWFLAG		0x0008
+#define SES_OW_ENABLED		0x0040	
+#define SMS4_ENABLED		0x0100
+
+
+#define WPA_AUTH_DISABLED	0x0000	
+#define WPA_AUTH_NONE		0x0001	
+#define WPA_AUTH_UNSPECIFIED	0x0002	
+#define WPA_AUTH_PSK		0x0004	
+	
+#define WPA2_AUTH_UNSPECIFIED	0x0040	
+#define WPA2_AUTH_PSK		0x0080	
+#define BRCM_AUTH_PSK           0x0100  
+#define BRCM_AUTH_DPT		0x0200	
+#define WPA_AUTH_WAPI		0x0400	
+
+#define WPA_AUTH_PFN_ANY	0xffffffff	
+
+
+#define	MAXPMKID		16
+
+typedef struct _pmkid {
+	struct ether_addr	BSSID;
+	uint8			PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+	uint32	npmkid;
+	pmkid_t	pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+	struct ether_addr	BSSID;
+	uint8			preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+	uint32	npmkid_cand;
+	pmkid_cand_t	pmkid_cand[1];
+} pmkid_cand_list_t;
+
+
+
+
+typedef struct {
+	uint32	val;
+	struct ether_addr ea;
+} scb_val_t;
+
+
+
+typedef struct channel_info {
+	int hw_channel;
+	int target_channel;
+	int scan_channel;
+} channel_info_t;
+
+
+struct maclist {
+	uint count;			
+	struct ether_addr ea[1];	
+};
+
+
+typedef struct get_pktcnt {
+	uint rx_good_pkt;
+	uint rx_bad_pkt;
+	uint tx_good_pkt;
+	uint tx_bad_pkt;
+	uint rx_ocast_good_pkt; 
+} get_pktcnt_t;
+
+
+typedef struct wl_ioctl {
+	uint cmd;	
+	void *buf;	
+	uint len;	
+	uint8 set;	
+	uint used;	
+	uint needed;	
+} wl_ioctl_t;
+
+
+
+#define WLC_IOCTL_MAGIC		0x14e46c77
+
+
+#define WLC_IOCTL_VERSION	1
+
+#define	WLC_IOCTL_MAXLEN	8192	
+#define	WLC_IOCTL_SMLEN		256		
+#define	WLC_IOCTL_MEDLEN	1536	
+
+
+
+#define WLC_GET_MAGIC				0
+#define WLC_GET_VERSION				1
+#define WLC_UP					2
+#define WLC_DOWN				3
+#define WLC_GET_LOOP				4
+#define WLC_SET_LOOP				5
+#define WLC_DUMP				6
+#define WLC_GET_MSGLEVEL			7
+#define WLC_SET_MSGLEVEL			8
+#define WLC_GET_PROMISC				9
+#define WLC_SET_PROMISC				10
+ 
+#define WLC_GET_RATE				12
+ 
+#define WLC_GET_INSTANCE			14
+ 
+ 
+ 
+ 
+#define WLC_GET_INFRA				19
+#define WLC_SET_INFRA				20
+#define WLC_GET_AUTH				21
+#define WLC_SET_AUTH				22
+#define WLC_GET_BSSID				23
+#define WLC_SET_BSSID				24
+#define WLC_GET_SSID				25
+#define WLC_SET_SSID				26
+#define WLC_RESTART				27
+ 
+#define WLC_GET_CHANNEL				29
+#define WLC_SET_CHANNEL				30
+#define WLC_GET_SRL				31
+#define WLC_SET_SRL				32
+#define WLC_GET_LRL				33
+#define WLC_SET_LRL				34
+#define WLC_GET_PLCPHDR				35
+#define WLC_SET_PLCPHDR				36
+#define WLC_GET_RADIO				37
+#define WLC_SET_RADIO				38
+#define WLC_GET_PHYTYPE				39
+#define WLC_DUMP_RATE				40
+#define WLC_SET_RATE_PARAMS			41
+ 
+ 
+#define WLC_GET_KEY				44
+#define WLC_SET_KEY				45
+#define WLC_GET_REGULATORY			46
+#define WLC_SET_REGULATORY			47
+#define WLC_GET_PASSIVE_SCAN			48
+#define WLC_SET_PASSIVE_SCAN			49
+#define WLC_SCAN				50
+#define WLC_SCAN_RESULTS			51
+#define WLC_DISASSOC				52
+#define WLC_REASSOC				53
+#define WLC_GET_ROAM_TRIGGER			54
+#define WLC_SET_ROAM_TRIGGER			55
+#define WLC_GET_ROAM_DELTA			56
+#define WLC_SET_ROAM_DELTA			57
+#define WLC_GET_ROAM_SCAN_PERIOD		58
+#define WLC_SET_ROAM_SCAN_PERIOD		59
+#define WLC_EVM					60	
+#define WLC_GET_TXANT				61
+#define WLC_SET_TXANT				62
+#define WLC_GET_ANTDIV				63
+#define WLC_SET_ANTDIV				64
+ 
+ 
+#define WLC_GET_CLOSED				67
+#define WLC_SET_CLOSED				68
+#define WLC_GET_MACLIST				69
+#define WLC_SET_MACLIST				70
+#define WLC_GET_RATESET				71
+#define WLC_SET_RATESET				72
+ 
+#define WLC_LONGTRAIN				74
+#define WLC_GET_BCNPRD				75
+#define WLC_SET_BCNPRD				76
+#define WLC_GET_DTIMPRD				77
+#define WLC_SET_DTIMPRD				78
+#define WLC_GET_SROM				79
+#define WLC_SET_SROM				80
+#define WLC_GET_WEP_RESTRICT			81
+#define WLC_SET_WEP_RESTRICT			82
+#define WLC_GET_COUNTRY				83
+#define WLC_SET_COUNTRY				84
+#define WLC_GET_PM				85
+#define WLC_SET_PM				86
+#define WLC_GET_WAKE				87
+#define WLC_SET_WAKE				88
+ 
+#define WLC_GET_FORCELINK			90	
+#define WLC_SET_FORCELINK			91	
+#define WLC_FREQ_ACCURACY			92	
+#define WLC_CARRIER_SUPPRESS			93	
+#define WLC_GET_PHYREG				94
+#define WLC_SET_PHYREG				95
+#define WLC_GET_RADIOREG			96
+#define WLC_SET_RADIOREG			97
+#define WLC_GET_REVINFO				98
+#define WLC_GET_UCANTDIV			99
+#define WLC_SET_UCANTDIV			100
+#define WLC_R_REG				101
+#define WLC_W_REG				102
+
+ 
+#define WLC_GET_MACMODE				105
+#define WLC_SET_MACMODE				106
+#define WLC_GET_MONITOR				107
+#define WLC_SET_MONITOR				108
+#define WLC_GET_GMODE				109
+#define WLC_SET_GMODE				110
+#define WLC_GET_LEGACY_ERP			111
+#define WLC_SET_LEGACY_ERP			112
+#define WLC_GET_RX_ANT				113
+#define WLC_GET_CURR_RATESET			114	
+#define WLC_GET_SCANSUPPRESS			115
+#define WLC_SET_SCANSUPPRESS			116
+#define WLC_GET_AP				117
+#define WLC_SET_AP				118
+#define WLC_GET_EAP_RESTRICT			119
+#define WLC_SET_EAP_RESTRICT			120
+#define WLC_SCB_AUTHORIZE			121
+#define WLC_SCB_DEAUTHORIZE			122
+#define WLC_GET_WDSLIST				123
+#define WLC_SET_WDSLIST				124
+#define WLC_GET_ATIM				125
+#define WLC_SET_ATIM				126
+#define WLC_GET_RSSI				127
+#define WLC_GET_PHYANTDIV			128
+#define WLC_SET_PHYANTDIV			129
+#define WLC_AP_RX_ONLY				130
+#define WLC_GET_TX_PATH_PWR			131
+#define WLC_SET_TX_PATH_PWR			132
+#define WLC_GET_WSEC				133
+#define WLC_SET_WSEC				134
+#define WLC_GET_PHY_NOISE			135
+#define WLC_GET_BSS_INFO			136
+#define WLC_GET_PKTCNTS				137
+#define WLC_GET_LAZYWDS				138
+#define WLC_SET_LAZYWDS				139
+#define WLC_GET_BANDLIST			140
+#define WLC_GET_BAND				141
+#define WLC_SET_BAND				142
+#define WLC_SCB_DEAUTHENTICATE			143
+#define WLC_GET_SHORTSLOT			144
+#define WLC_GET_SHORTSLOT_OVERRIDE		145
+#define WLC_SET_SHORTSLOT_OVERRIDE		146
+#define WLC_GET_SHORTSLOT_RESTRICT		147
+#define WLC_SET_SHORTSLOT_RESTRICT		148
+#define WLC_GET_GMODE_PROTECTION		149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE	150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE	151
+#define WLC_UPGRADE				152
+ 
+ 
+#define WLC_GET_IGNORE_BCNS			155
+#define WLC_SET_IGNORE_BCNS			156
+#define WLC_GET_SCB_TIMEOUT			157
+#define WLC_SET_SCB_TIMEOUT			158
+#define WLC_GET_ASSOCLIST			159
+#define WLC_GET_CLK				160
+#define WLC_SET_CLK				161
+#define WLC_GET_UP				162
+#define WLC_OUT					163
+#define WLC_GET_WPA_AUTH			164
+#define WLC_SET_WPA_AUTH			165
+#define WLC_GET_UCFLAGS				166
+#define WLC_SET_UCFLAGS				167
+#define WLC_GET_PWRIDX				168
+#define WLC_SET_PWRIDX				169
+#define WLC_GET_TSSI				170
+#define WLC_GET_SUP_RATESET_OVERRIDE		171
+#define WLC_SET_SUP_RATESET_OVERRIDE		172
+ 
+ 
+ 
+ 
+ 
+#define WLC_GET_PROTECTION_CONTROL		178
+#define WLC_SET_PROTECTION_CONTROL		179
+#define WLC_GET_PHYLIST				180
+#define WLC_ENCRYPT_STRENGTH			181	
+#define WLC_DECRYPT_STATUS			182	
+#define WLC_GET_KEY_SEQ				183
+#define WLC_GET_SCAN_CHANNEL_TIME		184
+#define WLC_SET_SCAN_CHANNEL_TIME		185
+#define WLC_GET_SCAN_UNASSOC_TIME		186
+#define WLC_SET_SCAN_UNASSOC_TIME		187
+#define WLC_GET_SCAN_HOME_TIME			188
+#define WLC_SET_SCAN_HOME_TIME			189
+#define WLC_GET_SCAN_NPROBES			190
+#define WLC_SET_SCAN_NPROBES			191
+#define WLC_GET_PRB_RESP_TIMEOUT		192
+#define WLC_SET_PRB_RESP_TIMEOUT		193
+#define WLC_GET_ATTEN				194
+#define WLC_SET_ATTEN				195
+#define WLC_GET_SHMEM				196	
+#define WLC_SET_SHMEM				197	
+ 
+ 
+#define WLC_SET_WSEC_TEST			200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define WLC_TKIP_COUNTERMEASURES		202
+#define WLC_GET_PIOMODE				203
+#define WLC_SET_PIOMODE				204
+#define WLC_SET_ASSOC_PREFER			205
+#define WLC_GET_ASSOC_PREFER			206
+#define WLC_SET_ROAM_PREFER			207
+#define WLC_GET_ROAM_PREFER			208
+#define WLC_SET_LED				209
+#define WLC_GET_LED				210
+#define WLC_GET_INTERFERENCE_MODE		211
+#define WLC_SET_INTERFERENCE_MODE		212
+#define WLC_GET_CHANNEL_QA			213
+#define WLC_START_CHANNEL_QA			214
+#define WLC_GET_CHANNEL_SEL			215
+#define WLC_START_CHANNEL_SEL			216
+#define WLC_GET_VALID_CHANNELS			217
+#define WLC_GET_FAKEFRAG			218
+#define WLC_SET_FAKEFRAG			219
+#define WLC_GET_PWROUT_PERCENTAGE		220
+#define WLC_SET_PWROUT_PERCENTAGE		221
+#define WLC_SET_BAD_FRAME_PREEMPT		222
+#define WLC_GET_BAD_FRAME_PREEMPT		223
+#define WLC_SET_LEAP_LIST			224
+#define WLC_GET_LEAP_LIST			225
+#define WLC_GET_CWMIN				226
+#define WLC_SET_CWMIN				227
+#define WLC_GET_CWMAX				228
+#define WLC_SET_CWMAX				229
+#define WLC_GET_WET				230
+#define WLC_SET_WET				231
+#define WLC_GET_PUB				232
+ 
+ 
+#define WLC_GET_KEY_PRIMARY			235
+#define WLC_SET_KEY_PRIMARY			236
+ 
+#define WLC_GET_ACI_ARGS			238
+#define WLC_SET_ACI_ARGS			239
+#define WLC_UNSET_CALLBACK			240
+#define WLC_SET_CALLBACK			241
+#define WLC_GET_RADAR				242
+#define WLC_SET_RADAR				243
+#define WLC_SET_SPECT_MANAGMENT			244
+#define WLC_GET_SPECT_MANAGMENT			245
+#define WLC_WDS_GET_REMOTE_HWADDR		246	
+#define WLC_WDS_GET_WPA_SUP			247
+#define WLC_SET_CS_SCAN_TIMER			248
+#define WLC_GET_CS_SCAN_TIMER			249
+#define WLC_MEASURE_REQUEST			250
+#define WLC_INIT				251
+#define WLC_SEND_QUIET				252
+#define WLC_KEEPALIVE			253
+#define WLC_SEND_PWR_CONSTRAINT			254
+#define WLC_UPGRADE_STATUS			255
+#define WLC_CURRENT_PWR				256
+#define WLC_GET_SCAN_PASSIVE_TIME		257
+#define WLC_SET_SCAN_PASSIVE_TIME		258
+#define WLC_LEGACY_LINK_BEHAVIOR		259
+#define WLC_GET_CHANNELS_IN_COUNTRY		260
+#define WLC_GET_COUNTRY_LIST			261
+#define WLC_GET_VAR				262	
+#define WLC_SET_VAR				263	
+#define WLC_NVRAM_GET				264	
+#define WLC_NVRAM_SET				265
+#define WLC_NVRAM_DUMP				266
+#define WLC_REBOOT				267
+#define WLC_SET_WSEC_PMK			268
+#define WLC_GET_AUTH_MODE			269
+#define WLC_SET_AUTH_MODE			270
+#define WLC_GET_WAKEENTRY			271
+#define WLC_SET_WAKEENTRY			272
+#define WLC_NDCONFIG_ITEM			273	
+#define WLC_NVOTPW				274
+#define WLC_OTPW				275
+#define WLC_IOV_BLOCK_GET			276
+#define WLC_IOV_MODULES_GET			277
+#define WLC_SOFT_RESET				278
+#define WLC_GET_ALLOW_MODE			279
+#define WLC_SET_ALLOW_MODE			280
+#define WLC_GET_DESIRED_BSSID			281
+#define WLC_SET_DESIRED_BSSID			282
+#define	WLC_DISASSOC_MYAP			283
+#define WLC_GET_NBANDS				284	
+#define WLC_GET_BANDSTATES			285	
+#define WLC_GET_WLC_BSS_INFO			286	
+#define WLC_GET_ASSOC_INFO			287	
+#define WLC_GET_OID_PHY				288	
+#define WLC_SET_OID_PHY				289	
+#define WLC_SET_ASSOC_TIME			290	
+#define WLC_GET_DESIRED_SSID			291	
+#define WLC_GET_CHANSPEC			292	
+#define WLC_GET_ASSOC_STATE			293	
+#define WLC_SET_PHY_STATE			294	
+#define WLC_GET_SCAN_PENDING			295	
+#define WLC_GET_SCANREQ_PENDING			296	
+#define WLC_GET_PREV_ROAM_REASON		297	
+#define WLC_SET_PREV_ROAM_REASON		298	
+#define WLC_GET_BANDSTATES_PI			299	
+#define WLC_GET_PHY_STATE			300	
+#define WLC_GET_BSS_WPA_RSN			301	
+#define WLC_GET_BSS_WPA2_RSN			302	
+#define WLC_GET_BSS_BCN_TS			303	
+#define WLC_GET_INT_DISASSOC			304	
+#define WLC_SET_NUM_PEERS			305     
+#define WLC_GET_NUM_BSS				306	
+#define WLC_LAST				307	
+
+
+
+#define WL_RADIO_SW_DISABLE		(1<<0)
+#define WL_RADIO_HW_DISABLE		(1<<1)
+#define WL_RADIO_MPC_DISABLE		(1<<2)
+#define WL_RADIO_COUNTRY_DISABLE	(1<<3)	
+
+
+#define WL_TXPWR_OVERRIDE	(1U<<31)
+
+#define WL_PHY_PAVARS_LEN	6	
+
+
+#define WL_DIAG_INTERRUPT			1	
+#define WL_DIAG_LOOPBACK			2	
+#define WL_DIAG_MEMORY				3	
+#define WL_DIAG_LED				4	
+#define WL_DIAG_REG				5	
+#define WL_DIAG_SROM				6	
+#define WL_DIAG_DMA				7	
+
+#define WL_DIAGERR_SUCCESS			0
+#define WL_DIAGERR_FAIL_TO_RUN			1	
+#define WL_DIAGERR_NOT_SUPPORTED		2	
+#define WL_DIAGERR_INTERRUPT_FAIL		3	
+#define WL_DIAGERR_LOOPBACK_FAIL		4	
+#define WL_DIAGERR_SROM_FAIL			5	
+#define WL_DIAGERR_SROM_BADCRC			6	
+#define WL_DIAGERR_REG_FAIL			7	
+#define WL_DIAGERR_MEMORY_FAIL			8	
+#define WL_DIAGERR_NOMEM			9	
+#define WL_DIAGERR_DMA_FAIL			10	
+
+#define WL_DIAGERR_MEMORY_TIMEOUT		11	
+#define WL_DIAGERR_MEMORY_BADPATTERN		12	
+
+
+#define	WLC_BAND_AUTO		0	
+#define	WLC_BAND_5G		1	
+#define	WLC_BAND_2G		2	
+#define	WLC_BAND_ALL		3	
+
+
+#define WL_CHAN_FREQ_RANGE_2G      0
+#define WL_CHAN_FREQ_RANGE_5GL     1
+#define WL_CHAN_FREQ_RANGE_5GM     2
+#define WL_CHAN_FREQ_RANGE_5GH     3
+
+
+#define	WLC_PHY_TYPE_A		0
+#define	WLC_PHY_TYPE_B		1
+#define	WLC_PHY_TYPE_G		2
+#define	WLC_PHY_TYPE_N		4
+#define	WLC_PHY_TYPE_LP		5
+#define	WLC_PHY_TYPE_SSN	6
+#define	WLC_PHY_TYPE_NULL	0xf
+
+
+#define WLC_MACMODE_DISABLED	0	
+#define WLC_MACMODE_DENY	1	
+#define WLC_MACMODE_ALLOW	2	
+
+
+#define GMODE_LEGACY_B		0
+#define GMODE_AUTO		1
+#define GMODE_ONLY		2
+#define GMODE_B_DEFERRED	3
+#define GMODE_PERFORMANCE	4
+#define GMODE_LRS		5
+#define GMODE_MAX		6
+
+
+#define WLC_PLCP_AUTO	-1
+#define WLC_PLCP_SHORT	0
+#define WLC_PLCP_LONG	1
+
+
+#define WLC_PROTECTION_AUTO		-1
+#define WLC_PROTECTION_OFF		0
+#define WLC_PROTECTION_ON		1
+#define WLC_PROTECTION_MMHDR_ONLY	2
+#define WLC_PROTECTION_CTS_ONLY		3
+
+
+#define WLC_PROTECTION_CTL_OFF		0
+#define WLC_PROTECTION_CTL_LOCAL	1
+#define WLC_PROTECTION_CTL_OVERLAP	2
+
+
+#define WLC_N_PROTECTION_OFF		0
+#define WLC_N_PROTECTION_OPTIONAL	1
+#define WLC_N_PROTECTION_20IN40		2
+#define WLC_N_PROTECTION_MIXEDMODE	3
+
+
+#define WLC_N_PREAMBLE_MIXEDMODE	0
+#define WLC_N_PREAMBLE_GF		1
+
+
+#define WLC_N_BW_20ALL			0
+#define WLC_N_BW_40ALL			1
+#define WLC_N_BW_20IN2G_40IN5G		2
+
+
+#define WLC_N_TXRX_CHAIN0		0
+#define WLC_N_TXRX_CHAIN1		1
+
+
+#define WLC_N_SGI_20			0x01
+#define WLC_N_SGI_40			0x02
+
+
+#define PM_OFF	0
+#define PM_MAX	1
+#define PM_FAST 2
+
+#define LISTEN_INTERVAL			10
+
+#define	INTERFERE_NONE	0	
+#define	NON_WLAN	1	
+#define	WLAN_MANUAL	2	
+#define	WLAN_AUTO	3	
+#define AUTO_ACTIVE	(1 << 7) 
+
+typedef struct wl_aci_args {
+	int enter_aci_thresh; 
+	int exit_aci_thresh; 
+	int usec_spin; 
+	int glitch_delay; 
+	uint16 nphy_adcpwr_enter_thresh;	
+	uint16 nphy_adcpwr_exit_thresh;	
+	uint16 nphy_repeat_ctr;		
+	uint16 nphy_num_samples;	
+	uint16 nphy_undetect_window_sz;	
+	uint16 nphy_b_energy_lo_aci;	
+	uint16 nphy_b_energy_md_aci;	
+	uint16 nphy_b_energy_hi_aci;	
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH	16	
+
+
+
+#define WL_ERROR_VAL		0x00000001
+#define WL_TRACE_VAL		0x00000002
+#define WL_PRHDRS_VAL		0x00000004
+#define WL_PRPKT_VAL		0x00000008
+#define WL_INFORM_VAL		0x00000010
+#define WL_TMP_VAL		0x00000020
+#define WL_OID_VAL		0x00000040
+#define WL_RATE_VAL		0x00000080
+#define WL_ASSOC_VAL		0x00000100
+#define WL_PRUSR_VAL		0x00000200
+#define WL_PS_VAL		0x00000400
+#define WL_TXPWR_VAL		0x00000800
+#define WL_PORT_VAL		0x00001000
+#define WL_DUAL_VAL		0x00002000
+#define WL_WSEC_VAL		0x00004000
+#define WL_WSEC_DUMP_VAL	0x00008000
+#define WL_LOG_VAL		0x00010000
+#define WL_NRSSI_VAL		0x00020000
+#define WL_LOFT_VAL		0x00040000
+#define WL_REGULATORY_VAL	0x00080000
+#define WL_PHYCAL_VAL		0x00100000
+#define WL_RADAR_VAL		0x00200000
+#define WL_MPC_VAL		0x00400000
+#define WL_APSTA_VAL		0x00800000
+#define WL_DFS_VAL		0x01000000
+#define WL_BA_VAL		0x02000000
+#define WL_MBSS_VAL		0x04000000
+#define WL_CAC_VAL		0x08000000
+#define WL_AMSDU_VAL		0x10000000
+#define WL_AMPDU_VAL		0x20000000
+#define WL_FFPLD_VAL		0x40000000
+
+
+#define WL_DPT_VAL 		0x00000001
+#define WL_SCAN_VAL		0x00000002
+#define WL_WOWL_VAL		0x00000004
+#define WL_COEX_VAL		0x00000008
+#define WL_RTDC_VAL		0x00000010
+#define WL_BTA_VAL		0x00000040
+
+
+#define	WL_LED_NUMGPIO		16	
+
+
+#define	WL_LED_OFF		0		
+#define	WL_LED_ON		1		
+#define	WL_LED_ACTIVITY		2		
+#define	WL_LED_RADIO		3		
+#define	WL_LED_ARADIO		4		
+#define	WL_LED_BRADIO		5		
+#define	WL_LED_BGMODE		6		
+#define	WL_LED_WI1		7
+#define	WL_LED_WI2		8
+#define	WL_LED_WI3		9
+#define	WL_LED_ASSOC		10		
+#define	WL_LED_INACTIVE		11		
+#define	WL_LED_ASSOCACT		12		
+#define	WL_LED_NUMBEHAVIOR	13
+
+
+#define	WL_LED_BEH_MASK		0x7f		
+#define	WL_LED_AL_MASK		0x80		
+
+
+#define WL_NUMCHANNELS		64
+#define WL_NUMCHANSPECS		100
+
+
+#define WL_WDS_WPA_ROLE_AUTH	0	
+#define WL_WDS_WPA_ROLE_SUP	1	
+#define WL_WDS_WPA_ROLE_AUTO	255	
+
+
+#define WL_EVENTING_MASK_LEN	16
+
+
+#define VNDR_IE_CMD_LEN		4	
+
+
+#define VNDR_IE_BEACON_FLAG	0x1
+#define VNDR_IE_PRBRSP_FLAG	0x2
+#define VNDR_IE_ASSOCRSP_FLAG	0x4
+#define VNDR_IE_AUTHRSP_FLAG	0x8
+#define VNDR_IE_PRBREQ_FLAG	0x10
+#define VNDR_IE_ASSOCREQ_FLAG	0x20
+#define VNDR_IE_CUSTOM_FLAG		0x100 
+
+#define VNDR_IE_INFO_HDR_LEN	(sizeof(uint32))
+
+typedef struct {
+	uint32 pktflag;			
+	vndr_ie_t vndr_ie_data;		
+} vndr_ie_info_t;
+
+typedef struct {
+	int iecount;			
+	vndr_ie_info_t vndr_ie_list[1];	
+} vndr_ie_buf_t;
+
+typedef struct {
+	char cmd[VNDR_IE_CMD_LEN];	
+	vndr_ie_buf_t vndr_ie_buffer;	
+} vndr_ie_setbuf_t;
+
+
+
+
+#define WL_JOIN_PREF_RSSI	1	
+#define WL_JOIN_PREF_WPA	2	
+#define WL_JOIN_PREF_BAND	3	
+
+
+#define WLJP_BAND_ASSOC_PREF	255	
+
+
+#define WL_WPA_ACP_MCS_ANY	"\x00\x00\x00\x00"
+
+struct tsinfo_arg {
+	uint8 octets[3];
+};
+
+
+#define	NFIFO			6	
+
+#define	WL_CNT_T_VERSION	5	
+#define	WL_CNT_EXT_T_VERSION	1
+
+typedef struct {
+	uint16	version;	
+	uint16	length;		
+
+	
+	uint32	txframe;	
+	uint32	txbyte;		
+	uint32	txretrans;	
+	uint32	txerror;	
+	uint32	txctl;		
+	uint32	txprshort;	
+	uint32	txserr;		
+	uint32	txnobuf;	
+	uint32	txnoassoc;	
+	uint32	txrunt;		
+	uint32	txchit;		
+	uint32	txcmiss;	
+
+	
+	uint32	txuflo;		
+	uint32	txphyerr;	
+	uint32	txphycrs;
+
+	
+	uint32	rxframe;	
+	uint32	rxbyte;		
+	uint32	rxerror;	
+	uint32	rxctl;		
+	uint32	rxnobuf;	
+	uint32	rxnondata;	
+	uint32	rxbadds;	
+	uint32	rxbadcm;	
+	uint32	rxfragerr;	
+	uint32	rxrunt;		
+	uint32	rxgiant;	
+	uint32	rxnoscb;	
+	uint32	rxbadproto;	
+	uint32	rxbadsrcmac;	
+	uint32	rxbadda;	
+	uint32	rxfilter;	
+
+	
+	uint32	rxoflo;		
+	uint32	rxuflo[NFIFO];	
+
+	uint32	d11cnt_txrts_off;	
+	uint32	d11cnt_rxcrc_off;	
+	uint32	d11cnt_txnocts_off;	
+
+	
+	uint32	dmade;		
+	uint32	dmada;		
+	uint32	dmape;		
+	uint32	reset;		
+	uint32	tbtt;		
+	uint32	txdmawar;
+	uint32	pkt_callback_reg_fail;	
+
+	
+	uint32	txallfrm;	
+	uint32	txrtsfrm;	
+	uint32	txctsfrm;	
+	uint32	txackfrm;	
+	uint32	txdnlfrm;	
+	uint32	txbcnfrm;	
+	uint32	txfunfl[8];	
+	uint32	txtplunfl;	
+	uint32	txphyerror;	
+	uint32	rxfrmtoolong;	
+	uint32	rxfrmtooshrt;	
+	uint32	rxinvmachdr;	
+	uint32	rxbadfcs;	
+	uint32	rxbadplcp;	
+	uint32	rxcrsglitch;	
+	uint32	rxstrt;		
+	uint32	rxdfrmucastmbss; 
+	uint32	rxmfrmucastmbss; 
+	uint32	rxcfrmucast;	
+	uint32	rxrtsucast;	
+	uint32	rxctsucast;	
+	uint32	rxackucast;	
+	uint32	rxdfrmocast;	
+	uint32	rxmfrmocast;	
+	uint32	rxcfrmocast;	
+	uint32	rxrtsocast;	
+	uint32	rxctsocast;	
+	uint32	rxdfrmmcast;	
+	uint32	rxmfrmmcast;	
+	uint32	rxcfrmmcast;	
+	uint32	rxbeaconmbss;	
+	uint32	rxdfrmucastobss; 
+	uint32	rxbeaconobss;	
+	uint32	rxrsptmout;	
+	uint32	bcntxcancl;	
+	uint32	rxf0ovfl;	
+	uint32	rxf1ovfl;	
+	uint32	rxf2ovfl;	
+	uint32	txsfovfl;	
+	uint32	pmqovfl;	
+	uint32	rxcgprqfrm;	
+	uint32	rxcgprsqovfl;	
+	uint32	txcgprsfail;	
+	uint32	txcgprssuc;	
+	uint32	prs_timeout;	
+	uint32	rxnack;
+	uint32	frmscons;
+	uint32	txnack;
+	uint32	txglitch_nack;	
+	uint32	txburst;	
+
+	
+	uint32	txfrag;		
+	uint32	txmulti;	
+	uint32	txfail;		
+	uint32	txretry;	
+	uint32	txretrie;	
+	uint32	rxdup;		
+	uint32	txrts;		
+	uint32	txnocts;	
+	uint32	txnoack;	
+	uint32	rxfrag;		
+	uint32	rxmulti;	
+	uint32	rxcrc;		
+	uint32	txfrmsnt;	
+	uint32	rxundec;	
+
+	
+	uint32	tkipmicfaill;	
+	uint32	tkipcntrmsr;	
+	uint32	tkipreplay;	
+	uint32	ccmpfmterr;	
+	uint32	ccmpreplay;	
+	uint32	ccmpundec;	
+	uint32	fourwayfail;	
+	uint32	wepundec;	
+	uint32	wepicverr;	
+	uint32	decsuccess;	
+	uint32	tkipicverr;	
+	uint32	wepexcluded;	
+
+	uint32	txchanrej;	
+	uint32	psmwds;		
+	uint32	phywatchdog;	
+
+	
+	uint32	prq_entries_handled;	
+	uint32	prq_undirected_entries;	
+	uint32	prq_bad_entries;	
+	uint32	atim_suppress_count;	
+	uint32	bcn_template_not_ready;	
+	uint32	bcn_template_not_ready_done; 
+	uint32	late_tbtt_dpc;	
+
+	
+	uint32  rx1mbps;	
+	uint32  rx2mbps;	
+	uint32  rx5mbps5;	
+	uint32  rx6mbps;	
+	uint32  rx9mbps;	
+	uint32  rx11mbps;	
+	uint32  rx12mbps;	
+	uint32  rx18mbps;	
+	uint32  rx24mbps;	
+	uint32  rx36mbps;	
+	uint32  rx48mbps;	
+	uint32  rx54mbps;	
+	uint32  rx108mbps; 	
+	uint32  rx162mbps;	
+	uint32  rx216mbps;	
+	uint32  rx270mbps;	
+	uint32  rx324mbps;	
+	uint32  rx378mbps;	
+	uint32  rx432mbps;	
+	uint32  rx486mbps;	
+	uint32  rx540mbps;	
+	
+	uint32	pktengrxducast; 
+	uint32	pktengrxdmcast; 
+} wl_cnt_t;
+
+typedef struct {
+	uint16	version;	
+	uint16	length;		
+
+	uint32 rxampdu_sgi;	
+	uint32 rxampdu_stbc; 
+	uint32 rxmpdu_sgi;	
+	uint32 rxmpdu_stbc;  
+	uint32	rxmcs0_40M;  
+	uint32	rxmcs1_40M;  
+	uint32	rxmcs2_40M;  
+	uint32	rxmcs3_40M;  
+	uint32	rxmcs4_40M;  
+	uint32	rxmcs5_40M;  
+	uint32	rxmcs6_40M;  
+	uint32	rxmcs7_40M;  
+	uint32	rxmcs32_40M;  
+
+	uint32	txfrmsnt_20Mlo;  
+	uint32	txfrmsnt_20Mup;  
+	uint32	txfrmsnt_40M;   
+
+	uint32 rx_20ul;
+} wl_cnt_ext_t;
+
+#define	WL_RXDIV_STATS_T_VERSION	1	
+typedef struct {
+	uint16	version;	
+	uint16	length;		
+
+	uint32 rxant[4];	
+} wl_rxdiv_stats_t;
+
+#define	WL_DELTA_STATS_T_VERSION	1	
+
+typedef struct {
+	uint16 version;     
+	uint16 length;      
+
+	
+	uint32 txframe;     
+	uint32 txbyte;      
+	uint32 txretrans;   
+	uint32 txfail;      
+
+	
+	uint32 rxframe;     
+	uint32 rxbyte;      
+
+	
+	uint32  rx1mbps;	
+	uint32  rx2mbps;	
+	uint32  rx5mbps5;	
+	uint32  rx6mbps;	
+	uint32  rx9mbps;	
+	uint32  rx11mbps;	
+	uint32  rx12mbps;	
+	uint32  rx18mbps;	
+	uint32  rx24mbps;	
+	uint32  rx36mbps;	
+	uint32  rx48mbps;	
+	uint32  rx54mbps;	
+	uint32  rx108mbps; 	
+	uint32  rx162mbps;	
+	uint32  rx216mbps;	
+	uint32  rx270mbps;	
+	uint32  rx324mbps;	
+	uint32  rx378mbps;	
+	uint32  rx432mbps;	
+	uint32  rx486mbps;	
+	uint32  rx540mbps;	
+} wl_delta_stats_t;
+
+#define WL_WME_CNT_VERSION	1	
+
+typedef struct {
+	uint32 packets;
+	uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+	uint16	version;	
+	uint16	length;		
+
+	wl_traffic_stats_t tx[AC_COUNT];	
+	wl_traffic_stats_t tx_failed[AC_COUNT];	
+	wl_traffic_stats_t rx[AC_COUNT];	
+	wl_traffic_stats_t rx_failed[AC_COUNT];	
+
+	wl_traffic_stats_t forward[AC_COUNT];	
+
+	wl_traffic_stats_t tx_expired[AC_COUNT];	
+
+} wl_wme_cnt_t;
+
+
+
+#define WLC_ROAM_TRIGGER_DEFAULT	0 
+#define WLC_ROAM_TRIGGER_BANDWIDTH	1 
+#define WLC_ROAM_TRIGGER_DISTANCE	2 
+#define WLC_ROAM_TRIGGER_MAX_VALUE	2 
+
+
+enum {
+	PFN_LIST_ORDER,
+	PFN_RSSI
+};
+
+enum {
+	DISABLE,
+	ENABLE
+};
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT	2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT	6
+
+#define SORT_CRITERIA_MASK		0x01
+#define AUTO_NET_SWITCH_MASK	0x02
+#define ENABLE_BKGRD_SCAN_MASK	0x04
+#define IMMEDIATE_SCAN_MASK		0x08
+#define	AUTO_CONNECT_MASK		0x10
+#define ENABLE_BD_SCAN_MASK		0x20
+#define ENABLE_ADAPTSCAN_MASK	0x40
+
+#define PFN_VERSION			1
+
+#define MAX_PFN_LIST_COUNT	16
+
+
+typedef struct wl_pfn_param {
+	int32 version;			
+	int32 scan_freq;		
+	int32 lost_network_timeout;	
+	int16 flags;			
+	int16 rssi_margin;		
+	int32  repeat_scan;
+	int32  max_freq_adjust;
+} wl_pfn_param_t;
+
+typedef struct wl_pfn {
+	wlc_ssid_t		ssid;			
+	int32			bss_type;		
+	int32			infra;			
+	int32			auth;			
+	uint32			wpa_auth;		
+	int32			wsec;			
+} wl_pfn_t;
+
+#define PNO_SCAN_MAX_FW		508*1000
+#define PNO_SCAN_MAX_FW_SEC	PNO_SCAN_MAX_FW/1000
+#define PNO_SCAN_MIN_FW_SEC	10
+
+
+#define TOE_TX_CSUM_OL		0x00000001
+#define TOE_RX_CSUM_OL		0x00000002
+
+
+#define TOE_ERRTEST_TX_CSUM	0x00000001
+#define TOE_ERRTEST_RX_CSUM	0x00000002
+#define TOE_ERRTEST_RX_CSUM2	0x00000004
+
+struct toe_ol_stats_t {
+	
+	uint32 tx_summed;
+
+	
+	uint32 tx_iph_fill;
+	uint32 tx_tcp_fill;
+	uint32 tx_udp_fill;
+	uint32 tx_icmp_fill;
+
+	
+	uint32 rx_iph_good;
+	uint32 rx_iph_bad;
+	uint32 rx_tcp_good;
+	uint32 rx_tcp_bad;
+	uint32 rx_udp_good;
+	uint32 rx_udp_bad;
+	uint32 rx_icmp_good;
+	uint32 rx_icmp_bad;
+
+	
+	uint32 tx_tcp_errinj;
+	uint32 tx_udp_errinj;
+	uint32 tx_icmp_errinj;
+
+	
+	uint32 rx_tcp_errinj;
+	uint32 rx_udp_errinj;
+	uint32 rx_icmp_errinj;
+};
+
+
+#define ARP_OL_AGENT		0x00000001
+#define ARP_OL_SNOOP		0x00000002
+#define ARP_OL_HOST_AUTO_REPLY	0x00000004
+#define ARP_OL_PEER_AUTO_REPLY	0x00000008
+
+
+#define ARP_ERRTEST_REPLY_PEER	0x1
+#define ARP_ERRTEST_REPLY_HOST	0x2
+
+#define ARP_MULTIHOMING_MAX	8	
+
+
+struct arp_ol_stats_t {
+	uint32  host_ip_entries;	
+	uint32  host_ip_overflow;	
+
+	uint32  arp_table_entries;	
+	uint32  arp_table_overflow;	
+
+	uint32  host_request;		
+	uint32  host_reply;		
+	uint32  host_service;		
+
+	uint32  peer_request;		
+	uint32  peer_request_drop;	
+	uint32  peer_reply;		
+	uint32  peer_reply_drop;	
+	uint32  peer_service;		
+};
+
+
+
+
+
+typedef struct wl_keep_alive_pkt {
+	uint32	period_msec;	
+	uint16	len_bytes;	
+	uint8	data[1];	
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN		OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+
+
+
+typedef enum wl_pkt_filter_type {
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH	
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+
+typedef struct wl_pkt_filter_pattern {
+	uint32	offset;		
+	uint32	size_bytes;	
+	uint8   mask_and_pattern[1]; 
+} wl_pkt_filter_pattern_t;
+
+
+typedef struct wl_pkt_filter {
+	uint32	id;		
+	uint32	type;		
+	uint32	negate_match;	
+	union {			
+		wl_pkt_filter_pattern_t pattern;	
+	} u;
+} wl_pkt_filter_t;
+
+#define WL_PKT_FILTER_FIXED_LEN		  OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN	  OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+
+
+typedef struct wl_pkt_filter_enable {
+	uint32	id;		
+	uint32	enable;		
+} wl_pkt_filter_enable_t;
+
+
+typedef struct wl_pkt_filter_list {
+	uint32	num;		
+	wl_pkt_filter_t	filter[1];	
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN	  OFFSETOF(wl_pkt_filter_list_t, filter)
+
+
+typedef struct wl_pkt_filter_stats {
+	uint32	num_pkts_matched;	
+	uint32	num_pkts_forwarded;	
+	uint32	num_pkts_discarded;	
+} wl_pkt_filter_stats_t;
+
+
+typedef struct wl_seq_cmd_ioctl {
+	uint32 cmd;		
+	uint32 len;		
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES	4
+
+
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+	(((cmd) == WLC_GET_MAGIC)		|| \
+	 ((cmd) == WLC_GET_VERSION)		|| \
+	 ((cmd) == WLC_GET_AP)			|| \
+	 ((cmd) == WLC_GET_INSTANCE))
+
+
+
+#define WL_PKTENG_PER_TX_START			0x01
+#define WL_PKTENG_PER_TX_STOP			0x02
+#define WL_PKTENG_PER_RX_START			0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 	0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 	0x06
+#define WL_PKTENG_PER_RX_STOP			0x08
+#define WL_PKTENG_PER_MASK			0xff
+
+#define WL_PKTENG_SYNCHRONOUS			0x100	
+
+typedef struct wl_pkteng {
+	uint32 flags;
+	uint32 delay;			
+	uint32 nframes;			
+	uint32 length;			
+	uint8  seqno;			
+	struct ether_addr dest;		
+	struct ether_addr src;		
+} wl_pkteng_t;
+
+#define NUM_80211b_RATES	4
+#define NUM_80211ag_RATES	8
+#define NUM_80211n_RATES	32
+#define NUM_80211_RATES		(NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+typedef struct wl_pkteng_stats {
+	uint32 lostfrmcnt;		
+	int32 rssi;			
+	int32 snr;			
+	uint16 rxpktcnt[NUM_80211_RATES+1];
+} wl_pkteng_stats_t;
+
+#define WL_WOWL_MAGIC	(1 << 0)	
+#define WL_WOWL_NET	(1 << 1)	
+#define WL_WOWL_DIS	(1 << 2)	
+#define WL_WOWL_RETR	(1 << 3)	
+#define WL_WOWL_BCN	(1 << 4)	
+#define WL_WOWL_TST	(1 << 5)	
+#define WL_WOWL_BCAST	(1 << 15)	
+
+#define MAGIC_PKT_MINLEN 102	
+
+typedef struct {
+	uint masksize;		
+	uint offset;		
+	uint patternoffset;	
+	uint patternsize;	
+	
+	
+} wl_wowl_pattern_t;
+
+typedef struct {
+	uint			count;
+	wl_wowl_pattern_t	pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct {
+	uint8	pci_wakeind;	
+	uint16	ucode_wakeind;	
+} wl_wowl_wakeind_t;
+
+
+typedef struct wl_txrate_class {
+	uint8		init_rate;
+	uint8		min_rate;
+	uint8		max_rate;
+} wl_txrate_class_t;
+
+
+
+
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT		100	
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN			5	
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX			1000	
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT		20	
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN			10	
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX			1000	
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT	300	
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN		10	
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX		900	
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX	100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT	200	
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN	200	
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX	10000	
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT	20	
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN	20	
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX	10000	
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT	25	
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN		0	
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX		100	
+
+
+typedef struct wl_obss_scan_arg {
+	int16	passive_dwell;
+	int16	active_dwell;
+	int16	bss_widthscan_interval;
+	int16	passive_total;
+	int16	active_total;
+	int16	chanwidth_transition_delay;
+	int16	activity_threshold;
+} wl_obss_scan_arg_t;
+#define WL_OBSS_SCAN_PARAM_LEN	sizeof(wl_obss_scan_arg_t)
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7	
+
+#define WL_COEX_INFO_MASK		0x07
+#define WL_COEX_INFO_REQ		0x01
+#define	WL_COEX_40MHZ_INTOLERANT	0x02
+#define	WL_COEX_WIDTH20			0x04
+
+typedef struct wl_action_obss_coex_req {
+	uint8 info;
+	uint8 num;
+	uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+#define MAX_RSSI_LEVELS 8
+
+
+typedef struct wl_rssi_event {
+	
+	uint32 rate_limit_msec;
+	
+	uint8 num_rssi_levels;
+	
+	int8 rssi_levels[MAX_RSSI_LEVELS];
+} wl_rssi_event_t;
+
+
+
+#define WLFEATURE_DISABLE_11N		0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX	0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX	0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX	0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX	0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX	0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX	0x00000040
+#define WLFEATURE_DISABLE_11N_GF	0x00000080
+
+
+
+#include <packed_section_end.h>
+
+
+#include <packed_section_start.h>
+
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+	struct ether_addr staAddr;
+	uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+	sta_prbreq_wps_ie_hdr_t hdr;
+	uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+	uint32 totLen;
+	uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcm4329/linux_osl.c b/drivers/net/wireless/bcm4329/linux_osl.c
new file mode 100644
index 0000000..cf72a07
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/linux_osl.c
@@ -0,0 +1,625 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c,v 1.125.12.3.8.7 2010/05/04 21:10:04 Exp $
+ */
+
+
+#define LINUX_OSL
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+#include <linux/mutex.h>
+
+#define PCI_CFG_RETRY 		10
+
+#define OS_HANDLE_MAGIC		0x1234abcd	
+#define BCM_MEM_FILENAME_LEN 	24		
+
+#ifdef DHD_USE_STATIC_BUF
+#define MAX_STATIC_BUF_NUM 16
+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE)
+typedef struct bcm_static_buf {
+	struct mutex static_sem;
+	unsigned char *buf_ptr;
+	unsigned char buf_use[MAX_STATIC_BUF_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define MAX_STATIC_PKT_NUM 8
+typedef struct bcm_static_pkt {
+	struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM];
+	struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM];
+	struct mutex osl_pkt_sem;
+	unsigned char pkt_use[MAX_STATIC_PKT_NUM*2];
+} bcm_static_pkt_t;
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+#endif 
+typedef struct bcm_mem_link {
+	struct bcm_mem_link *prev;
+	struct bcm_mem_link *next;
+	uint	size;
+	int	line;
+	char	file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+	osl_pubinfo_t pub;
+	uint magic;
+	void *pdev;
+	uint malloced;
+	uint failed;
+	uint bustype;
+	bcm_mem_link_t *dbgmem_list;
+};
+
+static int16 linuxbcmerrormap[] =
+{	0, 			
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-E2BIG,			
+	-E2BIG,			
+	-EBUSY, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EFAULT, 		
+	-ENOMEM, 		
+	-EOPNOTSUPP,		
+	-EMSGSIZE,		
+	-EINVAL,		
+	-EPERM,			
+	-ENOMEM, 		
+	-EINVAL, 		
+	-ERANGE, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL,		
+	-EIO,			
+	-ENODEV,		
+	-EINVAL,		
+	-EIO,			
+	-EIO,			
+	-EINVAL,		
+	-EINVAL,		
+
+
+
+#if BCME_LAST != -41
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+	for new error code defined in bcmutils.h"
+#endif 
+};
+
+
+int
+osl_error(int bcmerror)
+{
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	
+	return linuxbcmerrormap[-bcmerror];
+}
+
+void * dhd_os_prealloc(int section, unsigned long size);
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+	osl_t *osh;
+	gfp_t flags;
+
+	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	osh = kmalloc(sizeof(osl_t), flags);
+	ASSERT(osh);
+
+	bzero(osh, sizeof(osl_t));
+
+	
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+	osh->magic = OS_HANDLE_MAGIC;
+	osh->malloced = 0;
+	osh->failed = 0;
+	osh->dbgmem_list = NULL;
+	osh->pdev = pdev;
+	osh->pub.pkttag = pkttag;
+	osh->bustype = bustype;
+
+	switch (bustype) {
+		case PCI_BUS:
+		case SI_BUS:
+		case PCMCIA_BUS:
+			osh->pub.mmbus = TRUE;
+			break;
+		case JTAG_BUS:
+		case SDIO_BUS:
+		case USB_BUS:
+		case SPI_BUS:
+			osh->pub.mmbus = FALSE;
+			break;
+		default:
+			ASSERT(FALSE);
+			break;
+	}
+
+#ifdef DHD_USE_STATIC_BUF
+
+
+	if (!bcm_static_buf) {
+		if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+
+			STATIC_BUF_TOTAL_LEN))) {
+			printk("can not alloc static buf!\n");
+		}
+		else {
+			/* printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); */
+		}
+		
+		mutex_init(&bcm_static_buf->static_sem);
+
+		
+		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+
+	}
+	
+	if (!bcm_static_skb)
+	{
+		int i;
+		void *skb_buff_ptr = 0;
+		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+		skb_buff_ptr = dhd_os_prealloc(4, 0);
+
+		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
+		for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++)
+			bcm_static_skb->pkt_use[i] = 0;
+
+		mutex_init(&bcm_static_skb->osl_pkt_sem);
+	}
+#endif 
+	return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+	if (osh == NULL)
+		return;
+
+#ifdef DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif 
+	ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	kfree(osh);
+}
+
+
+void*
+osl_pktget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+	gfp_t flags;
+
+	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	if ((skb = __dev_alloc_skb(len, flags))) {
+		skb_put(skb, len);
+		skb->priority = 0;
+
+
+		osh->pub.pktalloced++;
+	}
+
+	return ((void*) skb);
+}
+
+
+void
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+	struct sk_buff *skb, *nskb;
+
+	skb = (struct sk_buff*) p;
+
+	if (send && osh->pub.tx_fn)
+		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+	
+	while (skb) {
+		nskb = skb->next;
+		skb->next = NULL;
+
+
+		if (skb->destructor) {
+			
+			dev_kfree_skb_any(skb);
+		} else {
+			
+			dev_kfree_skb(skb);
+		}
+
+		osh->pub.pktalloced--;
+
+		skb = nskb;
+	}
+}
+
+#ifdef DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+	int i = 0;
+	struct sk_buff *skb;
+
+	
+	if (len > (PAGE_SIZE*2))
+	{
+		printk("Do we really need this big skb??\n");
+		return osl_pktget(osh, len);
+	}
+
+	
+	mutex_lock(&bcm_static_skb->osl_pkt_sem);
+	if (len <= PAGE_SIZE)
+	{
+		
+		for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+		{
+			if (bcm_static_skb->pkt_use[i] == 0)
+				break;
+		}
+
+		if (i != MAX_STATIC_PKT_NUM)
+		{
+			bcm_static_skb->pkt_use[i] = 1;
+			mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+
+			skb = bcm_static_skb->skb_4k[i];
+			skb->tail = skb->data + len;
+			skb->len = len;
+			
+			return skb;
+		}
+	}
+
+	
+	for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+	{
+		if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
+			break;
+	}
+
+	if (i != MAX_STATIC_PKT_NUM)
+	{
+		bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
+		mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+		skb = bcm_static_skb->skb_8k[i];
+		skb->tail = skb->data + len;
+		skb->len = len;
+		
+		return skb;
+	}
+
+
+	
+	mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+	printk("all static pkt in use!\n");
+	return osl_pktget(osh, len);
+}
+
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+	int i;
+	
+	for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++)
+	{
+		if (p == bcm_static_skb->skb_4k[i])
+		{
+			mutex_lock(&bcm_static_skb->osl_pkt_sem);
+			bcm_static_skb->pkt_use[i] = 0;
+			mutex_unlock(&bcm_static_skb->osl_pkt_sem);
+
+			
+			return;
+		}
+	}
+	return osl_pktfree(osh, p, send);
+}
+#endif 
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+	uint val = 0;
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	
+	ASSERT(size == 4);
+
+	do {
+		pci_read_config_dword(osh->pdev, offset, &val);
+		if (val != 0xffffffff)
+			break;
+	} while (retry--);
+
+
+	return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	
+	ASSERT(size == 4);
+
+	do {
+		pci_write_config_dword(osh->pdev, offset, val);
+		if (offset != PCI_BAR0_WIN)
+			break;
+		if (osl_pci_read_config(osh, offset, size) == val)
+			break;
+	} while (retry--);
+
+}
+
+
+uint
+osl_pci_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+
+uint
+osl_pci_slot(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+
+
+void*
+osl_malloc(osl_t *osh, uint size)
+{
+	void *addr;
+	gfp_t flags;
+
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+#ifdef DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		int i = 0;
+		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+		{
+			mutex_lock(&bcm_static_buf->static_sem);
+			
+			for (i = 0; i < MAX_STATIC_BUF_NUM; i++)
+			{
+				if (bcm_static_buf->buf_use[i] == 0)
+					break;
+			}
+			
+			if (i == MAX_STATIC_BUF_NUM)
+			{
+				mutex_unlock(&bcm_static_buf->static_sem);
+				printk("all static buff in use!\n");
+				goto original;
+			}
+			
+			bcm_static_buf->buf_use[i] = 1;
+			mutex_unlock(&bcm_static_buf->static_sem);
+
+			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+			if (osh)
+				osh->malloced += size;
+
+			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+		}
+	}
+original:
+#endif 
+	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	if ((addr = kmalloc(size, flags)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh)
+		osh->malloced += size;
+
+	return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+		{
+			int buf_idx = 0;
+			
+			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+			
+			mutex_lock(&bcm_static_buf->static_sem);
+			bcm_static_buf->buf_use[buf_idx] = 0;
+			mutex_unlock(&bcm_static_buf->static_sem);
+
+			if (osh) {
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				osh->malloced -= size;
+			}
+			return;
+		}
+	}
+#endif 
+	if (osh) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+		osh->malloced -= size;
+	}
+	kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->malloced);
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->failed);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+uint
+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+
+void
+osl_delay(uint usec)
+{
+	uint d;
+
+	while (usec > 0) {
+		d = MIN(usec, 1000);
+		udelay(d);
+		usec -= d;
+	}
+}
+
+
+
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+	void * p;
+	gfp_t flags;
+
+	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	if ((p = skb_clone((struct sk_buff*)skb, flags)) == NULL)
+		return NULL;
+
+	
+	if (osh->pub.pkttag)
+		bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+	
+	osh->pub.pktalloced++;
+	return (p);
+}
diff --git a/drivers/net/wireless/bcm4329/miniopt.c b/drivers/net/wireless/bcm4329/miniopt.c
new file mode 100644
index 0000000..6a184a7
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/miniopt.c
@@ -0,0 +1,163 @@
+/*
+ * Description.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.c,v 1.1.6.4 2009/09/25 00:32:01 Exp $
+ */
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#include <typedefs.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "miniopt.h"
+
+
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+
+
+
+/* ---- Private Variables ------------------------------------------------ */
+/* ---- Private Function Prototypes -------------------------------------- */
+/* ---- Functions -------------------------------------------------------- */
+
+/* ----------------------------------------------------------------------- */
+void
+miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags)
+{
+	static const char *null_flags = "";
+
+	memset(t, 0, sizeof(miniopt_t));
+	t->name = name;
+	if (flags == NULL)
+		t->flags = null_flags;
+	else
+		t->flags = flags;
+	t->longflags = longflags;
+}
+
+
+/* ----------------------------------------------------------------------- */
+int
+miniopt(miniopt_t *t, char **argv)
+{
+	int keylen;
+	char *p, *eq, *valstr, *endptr = NULL;
+	int err = 0;
+
+	t->consumed = 0;
+	t->positional = FALSE;
+	memset(t->key, 0, MINIOPT_MAXKEY);
+	t->opt = '\0';
+	t->valstr = NULL;
+	t->good_int = FALSE;
+	valstr = NULL;
+
+	if (*argv == NULL) {
+		err = -1;
+		goto exit;
+	}
+
+	p = *argv++;
+	t->consumed++;
+
+	if (!t->opt_end && !strcmp(p, "--")) {
+		t->opt_end = TRUE;
+		if (*argv == NULL) {
+			err = -1;
+			goto exit;
+		}
+		p = *argv++;
+		t->consumed++;
+	}
+
+	if (t->opt_end) {
+		t->positional = TRUE;
+		valstr = p;
+	}
+	else if (!strncmp(p, "--", 2)) {
+		eq = strchr(p, '=');
+		if (eq == NULL && !t->longflags) {
+			fprintf(stderr,
+				"%s: missing \" = \" in long param \"%s\"\n", t->name, p);
+			err = 1;
+			goto exit;
+		}
+		keylen = eq ? (eq - (p + 2)) : (int)strlen(p) - 2;
+		if (keylen > 63) keylen = 63;
+		memcpy(t->key, p + 2, keylen);
+
+		if (eq) {
+			valstr = eq + 1;
+			if (*valstr == '\0') {
+				fprintf(stderr,
+				        "%s: missing value after \" = \" in long param \"%s\"\n",
+				        t->name, p);
+				err = 1;
+				goto exit;
+			}
+		}
+	}
+	else if (!strncmp(p, "-", 1)) {
+		t->opt = p[1];
+		if (strlen(p) > 2) {
+			fprintf(stderr,
+				"%s: only single char options, error on param \"%s\"\n",
+				t->name, p);
+			err = 1;
+			goto exit;
+		}
+		if (strchr(t->flags, t->opt)) {
+			/* this is a flag option, no value expected */
+			valstr = NULL;
+		} else {
+			if (*argv == NULL) {
+				fprintf(stderr,
+				"%s: missing value parameter after \"%s\"\n", t->name, p);
+				err = 1;
+				goto exit;
+			}
+			valstr = *argv;
+			argv++;
+			t->consumed++;
+		}
+	} else {
+		t->positional = TRUE;
+		valstr = p;
+	}
+
+	/* parse valstr as int just in case */
+	if (valstr) {
+		t->uval = (uint)strtoul(valstr, &endptr, 0);
+		t->val = (int)t->uval;
+		t->good_int = (*endptr == '\0');
+	}
+
+	t->valstr = valstr;
+
+exit:
+	if (err == 1)
+		t->opt = '?';
+
+	return err;
+}
diff --git a/drivers/net/wireless/bcm4329/sbutils.c b/drivers/net/wireless/bcm4329/sbutils.c
new file mode 100644
index 0000000..46cd510
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/sbutils.c
@@ -0,0 +1,1004 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c,v 1.662.4.10.2.7.4.2 2010/04/19 05:48:48 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+                     uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+
+#define	SET_SBREG(sii, r, mask, val)	\
+		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
+#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
+#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+	uint8 tmp;
+	uint32 val, intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	val = R_REG(sii->osh, sbr);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+	uint8 tmp;
+	volatile uint32 dummy;
+	uint32 intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+#ifdef IL_BIGENDIAN
+		dummy = R_REG(sii->osh, sbr);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+#else
+		dummy = R_REG(sii->osh, sbr);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+#endif	/* IL_BIGENDIAN */
+	} else
+		W_REG(sii->osh, sbr, v);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_flag(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 vec;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	if (siflag == -1)
+		vec = 0;
+	else
+		vec = 1 << siflag;
+	W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+	uint i;
+
+	for (i = 0; i < sii->numcores; i ++)
+		if (sba == sii->common_info->coresba[i])
+			return i;
+	return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+	uint32 sbaddr;
+
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS: {
+		sbconfig_t *sb = REGS2SB(sii->curmap);
+		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+		break;
+	}
+
+	case PCI_BUS:
+		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = 0;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		sbaddr  = (uint32)tmp << 12;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		sbaddr |= (uint32)tmp << 16;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		sbaddr |= (uint32)tmp << 24;
+		break;
+	}
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		sbaddr = (uint32)(uintptr)sii->curmap;
+		break;
+
+
+	default:
+		sbaddr = BADCOREADDR;
+		break;
+	}
+
+	return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint sbidh;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+	sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+	return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+	        (val << SBTML_SICF_SHIFT);
+	W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+		        (val << SBTML_SICF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatelow, w);
+	}
+
+	/* return the new value
+	 * for write operation, the following readback ensures the completion of write opration.
+	 */
+	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+		        (val << SBTMH_SISF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatehigh, w);
+	}
+
+	/* return the new value */
+	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbtmstatelow) &
+	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!sii->common_info->regs[coreidx]) {
+			sii->common_info->regs[coreidx] =
+			    REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		if (regoff >= SBCONFIGOFF) {
+			w = (R_SBREG(sii, r) & ~mask) | val;
+			W_SBREG(sii, r, w);
+		} else {
+			w = (R_REG(sii->osh, r) & ~mask) | val;
+			W_REG(sii->osh, r, w);
+		}
+	}
+
+	/* readback */
+	if (regoff >= SBCONFIGOFF)
+		w = R_SBREG(sii, r);
+	else {
+		if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+		    (coreidx == SI_CC_IDX) &&
+		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+			w = val;
+		} else
+			w = R_REG(sii->osh, r);
+	}
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			sb_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES	2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+	uint next;
+	uint ncc = 0;
+	uint i;
+
+	if (bus >= SB_MAXBUSES) {
+		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+		return 0;
+	}
+	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+	/* Scan all cores on the bus starting from core 0.
+	 * Core addresses must be contiguous on each bus.
+	 */
+	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+		sii->common_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+		/* keep and reuse the initial register mapping */
+		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
+			(sii->common_info->coresba[next] == sba)) {
+			SI_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+			sii->common_info->regs[next] = regs;
+		}
+
+		/* change core to 'next' and read its coreid */
+		sii->curmap = _sb_setcoreidx(sii, next);
+		sii->curidx = next;
+
+		sii->common_info->coreid[next] = sb_coreid(&sii->pub);
+
+		/* core specific processing... */
+		/* chipc provides # cores */
+		if (sii->common_info->coreid[next] == CC_CORE_ID) {
+			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+			uint32 ccrev = sb_corerev(&sii->pub);
+
+			/* determine numcores - this is the total # cores in the chip */
+			if (((ccrev == 4) || (ccrev >= 6)))
+				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+				        CID_CC_SHIFT;
+			else {
+				/* Older chips */
+				uint chip = sii->pub.chip;
+
+				if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
+					numcores = 6;
+				else if (chip == BCM4704_CHIP_ID)
+					numcores = 9;
+				else if (chip == BCM5365_CHIP_ID)
+					numcores = 7;
+				else {
+					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+					          chip));
+					ASSERT(0);
+					numcores = 1;
+				}
+			}
+			SI_MSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+				sii->pub.issim ? "QT" : ""));
+		}
+		/* scan bridged SB(s) and add results to the end of the list */
+		else if (sii->common_info->coreid[next] == OCP_CORE_ID) {
+			sbconfig_t *sb = REGS2SB(sii->curmap);
+			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+			uint nsbcc;
+
+			sii->numcores = next + 1;
+
+			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+				continue;
+			nsbba &= 0xfffff000;
+			if (_sb_coreidx(sii, nsbba) != BADIDX)
+				continue;
+
+			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+			if (sbba == SI_ENUM_BASE)
+				numcores -= nsbcc;
+			ncc += nsbcc;
+		}
+	}
+
+	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+	sii->numcores = i + ncc;
+	return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii;
+	uint32 origsba;
+
+	sii = SI_INFO(sih);
+
+	/* Save the current core info and validate it later till we know
+	 * for sure what is good and what is bad.
+	 */
+	origsba = _sb_coresba(sii);
+
+	/* scan all SB(s) starting from SI_ENUM_BASE */
+	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	sii->curmap = _sb_setcoreidx(sii, coreidx);
+	sii->curidx = coreidx;
+
+	return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+	uint32 sbaddr = sii->common_info->coresba[coreidx];
+	void *regs;
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!sii->common_info->regs[coreidx]) {
+			sii->common_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+		}
+		regs = sii->common_info->regs[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+		regs = sii->curmap;
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = (sbaddr >> 12) & 0x0f;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		tmp = (sbaddr >> 16) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		tmp = (sbaddr >> 24) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		regs = sii->curmap;
+		break;
+	}
+	case SPI_BUS:
+	case SDIO_BUS:
+		/* map new one */
+		if (!sii->common_info->regs[coreidx]) {
+			sii->common_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+			ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
+		}
+		regs = sii->common_info->regs[coreidx];
+		break;
+
+
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+	sbconfig_t *sb;
+	volatile uint32 *addrm;
+
+	sb = REGS2SB(sii->curmap);
+
+	switch (asidx) {
+	case 0:
+		addrm =  &sb->sbadmatch0;
+		break;
+
+	case 1:
+		addrm =  &sb->sbadmatch1;
+		break;
+
+	case 2:
+		addrm =  &sb->sbadmatch2;
+		break;
+
+	case 3:
+		addrm =  &sb->sbadmatch3;
+		break;
+
+	default:
+		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+		return 0;
+	}
+
+	return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	/* + 1 because of enumeration space */
+	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+
+	sii = SI_INFO(sih);
+
+	origidx = sii->curidx;
+	ASSERT(GOODIDX(origidx));
+
+	INTR_OFF(sii, intr_val);
+
+	/* switch over to chipcommon core if there is one, else use pci */
+	if (sii->pub.ccrev != NOREV) {
+		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+		/* do the buffer registers update */
+		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+	} else
+		ASSERT(0);
+
+	/* restore core index */
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/* if core is already in reset, just return */
+	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+		return;
+
+	/* if clocks are not enabled, put into reset and return */
+	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+		goto disable;
+
+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
+	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+		dummy = R_SBREG(sii, &sb->sbimstate);
+		OSL_DELAY(1);
+		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+	}
+
+	/* set reset and reject while enabling the clocks */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_REJ | SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(10);
+
+	/* don't forget to clear the initiator reject bit */
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+	/* leave reset and reject asserted */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	sb_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+
+	/* set reset while enabling the clock and forcing them on throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+		W_SBREG(sii, &sb->sbtmstatehigh, 0);
+	}
+	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+
+	/* leave clock enabled */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+}
+
+void
+sb_core_tofixup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	if ((BUSTYPE(sii->pub.bustype) != PCI_BUS) || PCIE(sii) ||
+	    (PCI(sii) && (sii->pub.buscorerev >= 5)))
+		return;
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		SET_SBREG(sii, &sb->sbimconfiglow,
+		          SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
+		          (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
+	} else {
+		if (sb_coreid(sih) == PCI_CORE_ID) {
+			SET_SBREG(sii, &sb->sbimconfiglow,
+			          SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
+			          (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
+		} else {
+			SET_SBREG(sii, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
+		}
+	}
+
+	sb_commit(sih);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ *	SI_BUS => mips
+ *	JTAG_BUS => chipc
+ *	PCI_BUS => pci or pcie
+ *	PCMCIA_BUS => pcmcia
+ *	SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 tmp, ret = 0xffffffff;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	if ((to & ~TO_MASK) != 0)
+		return ret;
+
+	/* Figure out the master core */
+	if (idx == BADIDX) {
+		switch (BUSTYPE(sii->pub.bustype)) {
+		case PCI_BUS:
+			idx = sii->pub.buscoreidx;
+			break;
+		case JTAG_BUS:
+			idx = SI_CC_IDX;
+			break;
+		case PCMCIA_BUS:
+		case SDIO_BUS:
+			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+			break;
+		case SI_BUS:
+			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+			break;
+		default:
+			ASSERT(0);
+		}
+		if (idx == BADIDX)
+			return ret;
+	}
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+	tmp = R_SBREG(sii, &sb->sbimconfiglow);
+	ret = tmp & TO_MASK;
+	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+	sb_commit(sih);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+	uint32 base;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	base = 0;
+
+	if (type == 0) {
+		base = admatch & SBAM_BASE0_MASK;
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE1_MASK;
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE2_MASK;
+	}
+
+	return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+	uint32 size;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	size = 0;
+
+	if (type == 0) {
+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+	}
+
+	return (size);
+}
diff --git a/drivers/net/wireless/bcm4329/siutils.c b/drivers/net/wireless/bcm4329/siutils.c
new file mode 100644
index 0000000..1814db0
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/siutils.c
@@ -0,0 +1,1527 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c,v 1.662.4.4.4.16.4.28 2010/06/23 21:37:54 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                              uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs);
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+static void *common_info_alloced = NULL;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	si_info_t *sii;
+
+	/* alloc si_info_t */
+	if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		return (NULL);
+	}
+
+	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+		if (NULL != sii->common_info)
+			MFREE(osh, sii->common_info, sizeof(si_common_info_t));
+		MFREE(osh, sii, sizeof(si_info_t));
+		return (NULL);
+	}
+	sii->vars = vars ? *vars : NULL;
+	sii->varsz = varsz ? *varsz : 0;
+
+	return (si_t *)sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32	wd_msticks;		/* watchdog timer ticks normalized to ms */
+
+/* generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+	static bool ksii_attached = FALSE;
+
+	if (!ksii_attached) {
+		void *regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+
+		if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+		                SI_BUS, NULL,
+		                osh != SI_OSH ? &ksii.vars : NULL,
+		                osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+			if (NULL != ksii.common_info)
+				MFREE(osh, ksii.common_info, sizeof(si_common_info_t));
+			SI_ERROR(("si_kattach: si_doattach failed\n"));
+			REG_UNMAP(regs);
+			return NULL;
+		}
+		REG_UNMAP(regs);
+
+		/* save ticks normalized to ms for si_watchdog_ms() */
+		if (PMUCTL_ENAB(&ksii.pub)) {
+			/* based on 32KHz ILP clock */
+			wd_msticks = 32;
+		} else {
+			wd_msticks = ALP_CLOCK / 1000;
+		}
+
+		ksii_attached = TRUE;
+		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+		        ksii.pub.ccrev, wd_msticks));
+	}
+
+	return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+	/* need to set memseg flag for CF card first before any sb registers access */
+	if (BUSTYPE(bustype) == PCMCIA_BUS)
+		sii->memseg = TRUE;
+
+
+	if (BUSTYPE(bustype) == SDIO_BUS) {
+		int err;
+		uint8 clkset;
+
+		/* Try forcing SDIO core to do ALPAvail request only */
+		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+		if (!err) {
+			uint8 clkval;
+
+			/* If register supported, wait for ALPAvail and then force ALP */
+			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+					PMU_MAX_TRANSITION_DLY);
+				if (!SBSDIO_ALPAV(clkval)) {
+					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+						clkval));
+					return FALSE;
+				}
+				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					clkset, &err);
+				OSL_DELAY(65);
+			}
+		}
+
+		/* Also, disable the extra SDIO pull-ups */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	}
+
+
+	return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs)
+{
+	bool pci, pcie;
+	uint i;
+	uint pciidx, pcieidx, pcirev, pcierev;
+
+	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+	/* get chipcommon chipstatus */
+	if (sii->pub.ccrev >= 11)
+		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+	/* get chipcommon capabilites */
+	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+
+	/* get pmu rev and caps */
+	if (sii->pub.cccaps & CC_CAP_PMU) {
+		sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+	}
+
+	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+		sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+		sii->pub.pmucaps));
+
+	/* figure out bus/orignal core idx */
+	sii->pub.buscoretype = NODEV_CORE_ID;
+	sii->pub.buscorerev = NOREV;
+	sii->pub.buscoreidx = BADIDX;
+
+	pci = pcie = FALSE;
+	pcirev = pcierev = NOREV;
+	pciidx = pcieidx = BADIDX;
+
+	for (i = 0; i < sii->numcores; i++) {
+		uint cid, crev;
+
+		si_setcoreidx(&sii->pub, i);
+		cid = si_coreid(&sii->pub);
+		crev = si_corerev(&sii->pub);
+
+		/* Display cores found */
+		SI_MSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+		        i, cid, crev, sii->common_info->coresba[i], sii->common_info->regs[i]));
+
+		if (BUSTYPE(bustype) == PCI_BUS) {
+			if (cid == PCI_CORE_ID) {
+				pciidx = i;
+				pcirev = crev;
+				pci = TRUE;
+			} else if (cid == PCIE_CORE_ID) {
+				pcieidx = i;
+				pcierev = crev;
+				pcie = TRUE;
+			}
+		} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+		           (cid == PCMCIA_CORE_ID)) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+		          (BUSTYPE(bustype) == SPI_BUS)) &&
+		         ((cid == PCMCIA_CORE_ID) ||
+		          (cid == SDIOD_CORE_ID))) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+
+		/* find the core idx before entering this func. */
+		if ((savewin && (savewin == sii->common_info->coresba[i])) ||
+		    (regs == sii->common_info->regs[i]))
+			*origidx = i;
+	}
+
+
+	SI_MSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+		sii->pub.buscorerev));
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+	    (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (sii->pub.chiprev <= 3))
+		OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+	 * already running.
+	 */
+	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+			si_core_disable(&sii->pub, 0);
+	}
+
+	/* return to the original core */
+	si_setcoreidx(&sii->pub, *origidx);
+
+	return TRUE;
+}
+
+
+
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	struct si_pub *sih = &sii->pub;
+	uint32 w, savewin;
+	chipcregs_t *cc;
+	char *pvars = NULL;
+	uint origidx;
+
+	ASSERT(GOODREGS(regs));
+
+	bzero((uchar*)sii, sizeof(si_info_t));
+
+
+	{
+		if (NULL == (common_info_alloced = (void *)MALLOC(osh, sizeof(si_common_info_t)))) {
+			SI_ERROR(("si_doattach: malloc failed! malloced %dbytes\n", MALLOCED(osh)));
+			return (NULL);
+		}
+		bzero((uchar*)(common_info_alloced), sizeof(si_common_info_t));
+	}
+	sii->common_info = (si_common_info_t *)common_info_alloced;
+	sii->common_info->attach_count++;
+
+	savewin = 0;
+
+	sih->buscoreidx = BADIDX;
+
+	sii->curmap = regs;
+	sii->sdh = sdh;
+	sii->osh = osh;
+
+
+	/* find Chipcommon address */
+	if (bustype == PCI_BUS) {
+		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+			savewin = SI_ENUM_BASE;
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		cc = (chipcregs_t *)regs;
+	} else
+	if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+		cc = (chipcregs_t *)sii->curmap;
+	} else {
+		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+	}
+
+	sih->bustype = bustype;
+	if (bustype != BUSTYPE(bustype)) {
+		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+			bustype, BUSTYPE(bustype)));
+		return NULL;
+	}
+
+	/* bus/core/clk setup for register access */
+	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+		return NULL;
+	}
+
+	/* ChipID recognition.
+	 *   We assume we can read chipid at offset 0 from the regs arg.
+	 *   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+	 *   some way of recognizing them needs to be added here.
+	 */
+	w = R_REG(osh, &cc->chipid);
+	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	/* Might as wll fill in chip id rev & pkg */
+	sih->chip = w & CID_ID_MASK;
+	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chippkg != BCM4329_289PIN_PKG_ID))
+		sih->chippkg = BCM4329_182PIN_PKG_ID;
+	sih->issim = IS_SIM(sih->chippkg);
+
+	/* scan for cores */
+	if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+		SI_MSG(("Found chip type SB (0x%08x)\n", w));
+		sb_scan(&sii->pub, regs, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+		SI_MSG(("Found chip type AI (0x%08x)\n", w));
+		/* pass chipc address instead of original core base */
+		ai_scan(&sii->pub, (void *)cc, devid);
+	} else {
+		SI_ERROR(("Found chip of unkown type (0x%08x)\n", w));
+		return NULL;
+	}
+	/* no cores found, bail out */
+	if (sii->numcores == 0) {
+		SI_ERROR(("si_doattach: could not find any cores\n"));
+		return NULL;
+	}
+	/* bus/core/clk setup */
+	origidx = SI_CC_IDX;
+	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+		SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+		return NULL;
+	}
+
+	pvars = NULL;
+
+
+
+		if (sii->pub.ccrev >= 20) {
+			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+			W_REG(osh, &cc->gpiopullup, 0);
+			W_REG(osh, &cc->gpiopulldown, 0);
+			si_setcoreidx(sih, origidx);
+		}
+
+		/* Skip PMU initialization from the Dongle Host.
+		 * Firmware will take care of it when it comes up.
+		 */
+
+
+
+	return (sii);
+}
+
+/* may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+	si_info_t *sii;
+	uint idx;
+
+	sii = SI_INFO(sih);
+
+	if (sii == NULL)
+		return;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS)
+		for (idx = 0; idx < SI_MAXCORES; idx++)
+			if (sii->common_info->regs[idx]) {
+				REG_UNMAP(sii->common_info->regs[idx]);
+				sii->common_info->regs[idx] = NULL;
+			}
+
+
+	if (1 == sii->common_info->attach_count--) {
+		MFREE(sii->osh, sii->common_info, sizeof(si_common_info_t));
+		common_info_alloced = NULL;
+	}
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (sii != &ksii)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sii->osh != NULL) {
+		SI_ERROR(("osh is already set....\n"));
+		ASSERT(!sii->osh);
+	}
+	sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+                          void *intrsenabled_fn, void *intr_arg)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intr_arg = intr_arg;
+	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+	/* save current core id.  when this function called, the current core
+	 * must be the core which provides driver functions(il, et, wl, etc.)
+	 */
+	sii->dev_coreid = sii->common_info->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	if (CHIPTYPE(sih->socitype) == SOCI_SB) {
+		sbconfig_t *ccsbr = (sbconfig_t *)((uintptr)((ulong)
+			    (sii->common_info->coresba[SI_CC_IDX]) + SBCONFIGOFF));
+		return R_REG(sii->osh, &ccsbr->sbflagst);
+	} else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return R_REG(sii->osh, ((uint32 *)(uintptr)
+			    (sii->common_info->oob_router + OOB_STATUSA)));
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_flag(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_setint(sih, siflag);
+	else
+		ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->common_info->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+	si_info_t *sii;
+	uint idx;
+	uint coreid;
+	uint coreunit;
+	uint i;
+
+	sii = SI_INFO(sih);
+	coreunit = 0;
+
+	idx = sii->curidx;
+
+	ASSERT(GOODREGS(sii->curmap));
+	coreid = si_coreid(sih);
+
+	/* count the cores of our type */
+	for (i = 0; i < idx; i++)
+		if (sii->common_info->coreid[i] == coreid)
+			coreunit++;
+
+	return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_corevendor(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_corerev(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+	si_info_t *sii;
+	uint found;
+	uint i;
+
+	sii = SI_INFO(sih);
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (sii->common_info->coreid[i] == coreid) {
+			if (found == coreunit)
+				return (i);
+			found++;
+		}
+
+	return (BADIDX);
+}
+
+/* return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	bcopy((uchar*)sii->common_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+	return (sii->numcores);
+}
+
+/* return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+
+	return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+	uint idx;
+
+	idx = si_findcoreidx(sih, coreid, coreunit);
+	if (!GOODIDX(idx))
+		return (NULL);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_setcoreidx(sih, idx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_setcoreidx(sih, coreidx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+	void *cc;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	INTR_OFF(sii, *intr_val);
+	*origidx = sii->curidx;
+	cc = si_setcore(sih, coreid, 0);
+	ASSERT(cc != NULL);
+
+	return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	si_setcoreidx(sih, coreid);
+	INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_numaddrspaces(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_addrspace(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_addrspacesize(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_core_cflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_core_cflags_wo(sih, mask, val);
+	else
+		ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_core_sflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_iscoreup(sih);
+	else {
+		ASSERT(0);
+		return FALSE;
+	}
+}
+
+void
+si_write_wrapperreg(si_t *sih, uint32 offset, uint32 val)
+{
+	/* only for 4319, no requirement for SOCI_SB */
+	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+		ai_write_wrap_reg(sih, offset, val);
+	}
+	else
+		return;
+
+	return;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_corereg(sih, coreidx, regoff, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_core_reset(sih, bits, resetbits);
+}
+
+void
+si_core_tofixup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_tofixup(sih);
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+	uint32 cflags;
+	int result = 0;
+
+	/* Read core control flags */
+	cflags = si_core_cflags(sih, 0, 0);
+
+	/* Set bist & fgc */
+	si_core_cflags(sih, 0, (SICF_BIST_EN | SICF_FGC));
+
+	/* Wait for bist done */
+	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+		result = BCME_ERROR;
+
+	/* Reset core control flags */
+	si_core_cflags(sih, 0xffff, cflags);
+
+	return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+	switch (x) {
+	case CC_F6_2:	return 2;
+	case CC_F6_3:	return 3;
+	case CC_F6_4:	return 4;
+	case CC_F6_5:	return 5;
+	case CC_F6_6:	return 6;
+	case CC_F6_7:	return 7;
+	default:	return 0;
+	}
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+	uint32 n1, n2, clock, m1, m2, m3, mc;
+
+	n1 = n & CN_N1_MASK;
+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+	if (pll_type == PLL_TYPE6) {
+		if (m & CC_T6_MMASK)
+			return CC_T6_M1;
+		else
+			return CC_T6_M0;
+	} else if ((pll_type == PLL_TYPE1) ||
+	           (pll_type == PLL_TYPE3) ||
+	           (pll_type == PLL_TYPE4) ||
+	           (pll_type == PLL_TYPE7)) {
+		n1 = factor6(n1);
+		n2 += CC_F5_BIAS;
+	} else if (pll_type == PLL_TYPE2) {
+		n1 += CC_T2_BIAS;
+		n2 += CC_T2_BIAS;
+		ASSERT((n1 >= 2) && (n1 <= 7));
+		ASSERT((n2 >= 5) && (n2 <= 23));
+	} else if (pll_type == PLL_TYPE5) {
+		return (100000000);
+	} else
+		ASSERT(0);
+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
+	if ((pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE7)) {
+		clock = CC_CLOCK_BASE2 * n1 * n2;
+	} else
+		clock = CC_CLOCK_BASE1 * n1 * n2;
+
+	if (clock == 0)
+		return 0;
+
+	m1 = m & CC_M1_MASK;
+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+	if ((pll_type == PLL_TYPE1) ||
+	    (pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE4) ||
+	    (pll_type == PLL_TYPE7)) {
+		m1 = factor6(m1);
+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+			m2 += CC_F5_BIAS;
+		else
+			m2 = factor6(m2);
+		m3 = factor6(m3);
+
+		switch (mc) {
+		case CC_MC_BYPASS:	return (clock);
+		case CC_MC_M1:		return (clock / m1);
+		case CC_MC_M1M2:	return (clock / (m1 * m2));
+		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
+		case CC_MC_M1M3:	return (clock / (m1 * m3));
+		default:		return (0);
+		}
+	} else {
+		ASSERT(pll_type == PLL_TYPE2);
+
+		m1 += CC_T2_BIAS;
+		m2 += CC_T2M2_BIAS;
+		m3 += CC_T2_BIAS;
+		ASSERT((m1 >= 2) && (m1 <= 7));
+		ASSERT((m2 >= 3) && (m2 <= 10));
+		ASSERT((m3 >= 2) && (m3 <= 7));
+
+		if ((mc & CC_T2MC_M1BYP) == 0)
+			clock /= m1;
+		if ((mc & CC_T2MC_M2BYP) == 0)
+			clock /= m2;
+		if ((mc & CC_T2MC_M3BYP) == 0)
+			clock /= m3;
+
+		return (clock);
+	}
+}
+
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+	if (PMUCTL_ENAB(sih)) {
+
+		if ((sih->chip == BCM4319_CHIP_ID) && (sih->chiprev == 0) && (ticks != 0)) {
+			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+			si_setcore(sih, USB20D_CORE_ID, 0);
+			si_core_disable(sih, 1);
+			si_setcore(sih, CC_CORE_ID, 0);
+		}
+
+		if (ticks == 1)
+			ticks = 2;
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+	} else {
+		/* instant NMI */
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+	}
+}
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+/* trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	si_watchdog(sih, wd_msticks * ms);
+}
+#endif
+
+
+
+/* initialize the sdio core */
+void
+si_sdio_init(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (((sih->buscoretype == PCMCIA_CORE_ID) && (sih->buscorerev >= 8)) ||
+	    (sih->buscoretype == SDIOD_CORE_ID)) {
+		uint idx;
+		sdpcmd_regs_t *sdpregs;
+
+		/* get the current core index */
+		idx = sii->curidx;
+		ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0));
+
+		/* switch to sdio core */
+		if (!(sdpregs = (sdpcmd_regs_t *)si_setcore(sih, PCMCIA_CORE_ID, 0)))
+			sdpregs = (sdpcmd_regs_t *)si_setcore(sih, SDIOD_CORE_ID, 0);
+		ASSERT(sdpregs);
+
+		SI_MSG(("si_sdio_init: For PCMCIA/SDIO Corerev %d, enable ints from core %d "
+		        "through SD core %d (%p)\n",
+		        sih->buscorerev, idx, sii->curidx, sdpregs));
+
+		/* enable backplane error and core interrupts */
+		W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT);
+		W_REG(sii->osh, &sdpregs->sbintmask, (I_SB_SERR | I_SB_RESPERR | (1 << idx)));
+
+		/* switch back to previous core */
+		si_setcoreidx(sih, idx);
+	}
+
+	/* enable interrupts */
+	bcmsdh_intr_enable(sii->sdh);
+
+}
+
+
+/* change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+	return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/* mask&set gpiocontrol bits */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioouten);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioout);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return -1;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return -1;
+	}
+
+	/* already reserved */
+	if (si_gpioreservation & gpio_bitmask)
+		return -1;
+	/* set reservation */
+	si_gpioreservation |= gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return -1;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return -1;
+	}
+
+	/* already released */
+	if (!(si_gpioreservation & gpio_bitmask))
+		return -1;
+
+	/* clear reservation */
+	si_gpioreservation &= ~gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+	si_info_t *sii;
+	uint regoff;
+
+	sii = SI_INFO(sih);
+	regoff = 0;
+
+	regoff = OFFSETOF(chipcregs_t, gpioin);
+	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	si_info_t *sii;
+	uint regoff;
+
+	sii = SI_INFO(sih);
+	regoff = 0;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	si_info_t *sii;
+	uint regoff;
+
+	sii = SI_INFO(sih);
+	regoff = 0;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 16)
+		return -1;
+
+	/* gpio led powersave reg */
+	return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (sih->ccrev < 16)
+		return -1;
+
+	return (si_corereg(sih, SI_CC_IDX,
+		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	uint offs;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 20)
+		return -1;
+
+	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	uint offs;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return -1;
+
+	if (regtype == GPIO_REGEVT)
+		offs = OFFSETOF(chipcregs_t, gpioevent);
+	else if (regtype == GPIO_REGEVT_INTMSK)
+		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+	else if (regtype == GPIO_REGEVT_INTPOL)
+		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+	else
+		return -1;
+
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+	bool level, gpio_handler_t cb, void *arg)
+{
+	si_info_t *sii;
+	gpioh_item_t *gi;
+
+	ASSERT(event);
+	ASSERT(cb != NULL);
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return NULL;
+
+	if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+		return NULL;
+
+	bzero(gi, sizeof(gpioh_item_t));
+	gi->event = event;
+	gi->handler = cb;
+	gi->arg = arg;
+	gi->level = level;
+
+	gi->next = sii->gpioh_head;
+	sii->gpioh_head = gi;
+
+	return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+	si_info_t *sii;
+	gpioh_item_t *p, *n;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return;
+
+	ASSERT(sii->gpioh_head != NULL);
+	if ((void*)sii->gpioh_head == gpioh) {
+		sii->gpioh_head = sii->gpioh_head->next;
+		MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+		return;
+	} else {
+		p = sii->gpioh_head;
+		n = p->next;
+		while (n) {
+			if ((void*)n == gpioh) {
+				p->next = n->next;
+				MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+				return;
+			}
+			p = n;
+			n = n->next;
+		}
+	}
+
+	ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+	si_info_t *sii;
+	gpioh_item_t *h;
+	uint32 status;
+	uint32 level = si_gpioin(sih);
+	uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+
+	sii = SI_INFO(sih);
+	for (h = sii->gpioh_head; h != NULL; h = h->next) {
+		if (h->handler) {
+			status = (h->level ? level : edge);
+
+			if (status & h->event)
+				h->handler(status, h->arg);
+		}
+	}
+
+	si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+	si_info_t *sii;
+	uint offs;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return -1;
+
+	offs = OFFSETOF(chipcregs_t, intmask);
+	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/* Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	sii = SI_INFO(sih);
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev == 0)
+		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+	else if (corerev < 3) {
+		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	} else {
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb --;
+		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+		if (lss != 0)
+			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+void
+si_btcgpiowar(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+	chipcregs_t *cc;
+
+	sii = SI_INFO(sih);
+
+	/* Make sure that there is ChipCommon core present &&
+	 * UART_TX is strapped to 1
+	 */
+	if (!(sih->cccaps & CC_CAP_UARTGPIO))
+		return;
+
+	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(cc != NULL);
+
+	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+/* check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+	uint32 w;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case PCI_BUS:
+		ASSERT(sii->osh != NULL);
+		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((w & 0xFFFF) != VENDOR_BROADCOM)
+			return TRUE;
+		else
+			return FALSE;
+	default:
+		return FALSE;
+	}
+	return FALSE;
+}
diff --git a/drivers/net/wireless/bcm4329/siutils_priv.h b/drivers/net/wireless/bcm4329/siutils_priv.h
new file mode 100644
index 0000000..e8ad7e5
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/siutils_priv.h
@@ -0,0 +1,213 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h,v 1.3.10.5.4.2 2009/09/22 13:28:16 Exp $
+ */
+
+#ifndef	_siutils_priv_h_
+#define	_siutils_priv_h_
+
+/* debug/trace */
+#define	SI_ERROR(args)
+
+#define	SI_MSG(args)
+
+#define	IS_SIM(chippkg)	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+	void			*arg;
+	bool			level;
+	gpio_handler_t		handler;
+	uint32			event;
+	struct gpioh_item	*next;
+} gpioh_item_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_common_info {
+	void	*regs[SI_MAXCORES];	/* other regs va */
+	void	*regs2[SI_MAXCORES];	/* va of each core second register set (usbh20) */
+	uint	coreid[SI_MAXCORES];	/* id of each core */
+	uint32	cia[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	cib[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	coresba_size[SI_MAXCORES]; /* backplane address space size */
+	uint32	coresba2_size[SI_MAXCORES]; /* second address space size */
+	uint32	coresba[SI_MAXCORES];	/* backplane address of each core */
+	uint32	coresba2[SI_MAXCORES];	/* address of each core second register set (usbh20) */
+	void	*wrappers[SI_MAXCORES];	/* other cores wrapper va */
+	uint32	wrapba[SI_MAXCORES];	/* address of controlling wrapper */
+	uint32	oob_router;		/* oob router registers for axi */
+	uint8	attach_count;
+} si_common_info_t;
+
+typedef struct si_info {
+	struct si_pub pub;		/* back plane public state (must be first field) */
+
+	void	*osh;			/* osl os handle */
+	void	*sdh;			/* bcmsdh handle */
+	void *pch;			/* PCI/E core handle */
+	uint	dev_coreid;		/* the core provides driver functions */
+	void	*intr_arg;		/* interrupt callback function arg */
+	si_intrsoff_t intrsoff_fn;	/* turns chip interrupts off */
+	si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+	si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+
+	gpioh_item_t *gpioh_head; 	/* GPIO event handlers list */
+
+	bool	memseg;			/* flag to toggle MEM_SEG register */
+
+	char *vars;
+	uint varsz;
+
+	void	*curmap;		/* current regs va */
+
+	uint	curidx;			/* current core index */
+	uint	numcores;		/* # discovered cores */
+	void	*curwrap;		/* current wrapper va */
+	si_common_info_t	*common_info;	/* Common information for all the cores in a chip */
+} si_info_t;
+
+#define	SI_INFO(sih)	(si_info_t *)(uintptr)sih
+
+#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+		ISALIGNED((x), SI_CORE_SIZE))
+#define	GOODREGS(regs)	((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR	0
+#define	GOODIDX(idx)	(((uint)idx) < SI_MAXCORES)
+#define	BADIDX		(SI_MAXCORES + 1)
+#define	NOREV		-1		/* Invalid rev */
+
+#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCI_CORE_ID))
+#define PCIE(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE_CORE_ID))
+#define PCMCIA(si)	((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) ||	\
+		     (((si)->pub.buscoretype == PCI_CORE_ID) && (si)->pub.buscorerev >= 13))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+	if ((si)->intrsoff_fn && (si)->common_info->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+	if ((si)->intrsrestore_fn && (si)->common_info->coreid[(si)->curidx] == (si)->dev_coreid) {\
+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define	LPOMINFREQ		25000		/* low power oscillator min */
+#define	LPOMAXFREQ		43000		/* low power oscillator max */
+#define	XTALMINFREQ		19800000	/* 20 MHz - 1% */
+#define	XTALMAXFREQ		20200000	/* 20 MHz + 1% */
+#define	PCIMINFREQ		25000000	/* 25 MHz */
+#define	PCIMAXFREQ		34000000	/* 33 MHz + fudge */
+
+#define	ILP_DIV_5MHZ		0		/* ILP = 5 MHz */
+#define	ILP_DIV_1MHZ		4		/* ILP = 1 MHz */
+
+#define PCI_FORCEHT(si)	\
+	(((PCIE(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+	((PCI(si) || PCIE(si)) && (si->pub.chip == BCM4321_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME	10		/* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME	90		/* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_tofixup(si_t *sih);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val);
+
+
+#endif	/* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcm4329/wl_iw.c b/drivers/net/wireless/bcm4329/wl_iw.c
new file mode 100644
index 0000000..230619d
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/wl_iw.c
@@ -0,0 +1,8441 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.78 2011/02/11 21:27:52 Exp $
+ */
+
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+
+typedef void wlc_info_t;
+typedef void wl_info_t;
+typedef const struct si_pub  si_t;
+#include <wlioctl.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+#define WL_ASSOC(x)
+#define WL_INFORM(x)
+#define WL_WSEC(x)
+#define WL_SCAN(x)
+#define WL_PNO(x)
+#define WL_TRACE_COEX(x)
+
+#include <wl_iw.h>
+
+
+
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1	0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4	0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+
+
+#define IW_WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+
+#include <linux/rtnetlink.h>
+#include <linux/mutex.h>
+
+#define WL_IW_USE_ISCAN  1
+#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS  1
+
+#if defined(SOFTAP)
+#define WL_SOFTAP(x) printk x
+static struct net_device *priv_dev;
+static bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+static long ap_cfg_pid = -1;
+struct net_device *ap_net_dev = NULL;
+struct semaphore  ap_eth_sema;
+static struct completion ap_cfg_exited;
+static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap);
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac);
+#endif
+
+#define WL_IW_IOCTL_CALL(func_call) \
+	do {				\
+		func_call;		\
+	} while (0)
+
+static int		g_onoff = G_WLAN_SET_ON;
+wl_iw_extra_params_t	g_wl_iw_params;
+static struct mutex	wl_cache_lock;
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+	uint32 reason, char* stringBuf, uint buflen);
+#include <bcmsdbus.h>
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+extern void dhd_dev_init_ioctl(struct net_device *dev);
+int dev_iw_write_cfg1_bss_var(struct net_device *dev, int val);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+
+#if defined(IL_BIGENDIAN)
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+#endif
+
+#ifdef CONFIG_WIRELESS_EXT
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#endif 
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd)	((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd)	((cmd) - IWEVFIRST)
+#endif 
+
+static void *g_scan = NULL;
+static volatile uint g_scan_specified_ssid;
+static wlc_ssid_t g_specific_ssid;
+
+static wlc_ssid_t g_ssid;
+
+bool btcoex_is_sco_active(struct net_device *dev);
+static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl;
+#if defined(CONFIG_FIRST_SCAN)
+static volatile uint g_first_broadcast_scan;
+static volatile uint g_first_counter_scans;
+#define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else 
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif 
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+static void wl_iw_free_ss_cache(void);
+static int   wl_iw_run_ss_cache_timer(int kick_off);
+#endif
+#if defined(CONFIG_FIRST_SCAN)
+int  wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+#endif
+static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len);
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	int    iscan_state;
+	iscan_buf_t * list_hdr;
+	iscan_buf_t * list_cur;
+
+	
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+
+	uint32 scan_flag;	
+#if defined CSCAN
+	char ioctlbuf[WLC_IOCTL_MEDLEN];
+#else
+	char ioctlbuf[WLC_IOCTL_SMLEN];
+#endif
+	wl_iscan_params_t *iscan_ex_params_p;
+	int iscan_ex_param_size;
+} iscan_info_t;
+#define COEX_DHCP 1
+
+#define BT_DHCP_eSCO_FIX
+#define BT_DHCP_USE_FLAGS
+#define BT_DHCP_OPPORTUNITY_WINDOW_TIME	 2500
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+static void wl_iw_bt_flag_set(struct net_device *dev, bool set);
+static void wl_iw_bt_release(void);
+
+typedef enum bt_coex_status {
+	BT_DHCP_IDLE = 0,
+	BT_DHCP_START,
+	BT_DHCP_OPPORTUNITY_WINDOW,
+	BT_DHCP_FLAG_FORCE_TIMEOUT
+} coex_status_t;
+
+typedef struct bt_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	bool   dhcp_done;
+	int    bt_state;
+
+	long   bt_pid;
+	struct semaphore bt_sem;
+	struct completion bt_exited;
+} bt_info_t;
+
+bt_info_t *g_bt = NULL;
+static void wl_iw_bt_timerfunc(ulong data);
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+#endif 
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+);
+
+#ifndef CSCAN
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+);
+
+static uint
+wl_iw_get_scan_prep(
+	wl_scan_results_t *list,
+	struct iw_request_info *info,
+	char *extra,
+	short max_size
+);
+#endif
+
+static void swap_key_from_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+	struct net_device *dev,
+	int cmd,
+	void *arg,
+	int len
+)
+{
+	struct ifreq ifr;
+	wl_ioctl_t ioc;
+	mm_segment_t fs;
+	int ret = -EINVAL;
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return ret;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_INFORM(("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d ,\n",
+		__FUNCTION__, current->pid, cmd, arg, len));
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		memset(&ioc, 0, sizeof(ioc));
+		ioc.cmd = cmd;
+		ioc.buf = arg;
+		ioc.len = len;
+
+		strcpy(ifr.ifr_name, dev->name);
+		ifr.ifr_data = (caddr_t) &ioc;
+
+		ret = dev_open(dev);
+		if (ret) {
+			WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret));
+			net_os_wake_unlock(dev);
+			return ret;
+		}
+
+		fs = get_fs();
+		set_fs(get_ds());
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
+		ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+		ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+		set_fs(fs);
+	}
+	else {
+		WL_TRACE(("%s: call after driver stop : ignored\n", __FUNCTION__));
+	}
+
+	net_os_wake_unlock(dev);
+
+	return ret;
+}
+
+
+static int
+dev_wlc_intvar_get_reg(
+	struct net_device *dev,
+	char *name,
+	uint  reg,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	len = bcm_mkiovar(name, (char *)(&reg), sizeof(reg), (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+	return (error);
+}
+
+
+static int
+dev_wlc_intvar_set_reg(
+	struct net_device *dev,
+	char *name,
+	char *addr,
+	char * val)
+{
+	char reg_addr[8];
+
+	memset(reg_addr, 0, sizeof(reg_addr));
+	memcpy((char *)&reg_addr[0], (char *)addr, 4);
+	memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+	return (dev_wlc_bufvar_set(dev, name,  (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+
+static int
+dev_wlc_intvar_set(
+	struct net_device *dev,
+	char *name,
+	int val)
+{
+	char buf[WLC_IOCTL_SMLEN];
+	uint len;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	ASSERT(len);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+dev_iw_iovar_setbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+
+	if (iolen == 0)
+		return 0;
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+#endif 
+
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+	struct net_device *dev,
+	char *name,
+	char *buf, int len)
+{
+	static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+	uint buflen;
+
+	buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf));
+	ASSERT(buflen);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen));
+}
+#endif
+
+
+static int
+dev_wlc_bufvar_get(
+	struct net_device *dev,
+	char *name,
+	char *buf, int buflen)
+{
+	static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+	int error;
+	uint len;
+
+	len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	if (!error)
+		bcopy(ioctlbuf, buf, buflen);
+
+	return (error);
+}
+
+
+
+static int
+dev_wlc_intvar_get(
+	struct net_device *dev,
+	char *name,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	uint data_null;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+
+	return (error);
+}
+
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_active_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int as = 0;
+	int error = 0;
+	char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+	if (g_iscan->iscan_state == ISCAN_STATE_IDLE)
+#endif 
+		error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+#if defined(WL_IW_USE_ISCAN)
+	else
+		g_iscan->scan_flag = as;
+#endif 
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_set_passive_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int ps = 1;
+	int error = 0;
+	char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+	if (g_iscan->iscan_state == ISCAN_STATE_IDLE) {
+#endif 
+
+		 
+		if (g_scan_specified_ssid == 0) {
+			error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &ps, sizeof(ps));
+		}
+#if defined(WL_IW_USE_ISCAN)
+	}
+	else
+		g_iscan->scan_flag = ps;
+#endif 
+
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+
+static int
+wl_iw_set_txpower(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	int txpower = -1;
+
+	txpower = bcm_atoi(extra + strlen(TXPOWER_SET_CMD) + 1);
+	if ((txpower >= 0) && (txpower <= 127)) {
+		txpower |= WL_TXPWR_OVERRIDE;
+		txpower = htod32(txpower);
+
+		error = dev_wlc_intvar_set(dev, "qtxpower", txpower);
+		p += snprintf(p, MAX_WX_STRING, "OK");
+		WL_TRACE(("%s: set TXpower 0x%X is OK\n", __FUNCTION__, txpower));
+	} else {
+		WL_ERROR(("%s: set tx power failed\n", __FUNCTION__));
+		p += snprintf(p, MAX_WX_STRING, "FAIL");
+	}
+
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_get_macaddr(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error;
+	char buf[128];
+	struct ether_addr *id;
+	char *p = extra;
+
+	
+	strcpy(buf, "cur_etheraddr");
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, buf, sizeof(buf));
+	id = (struct ether_addr *) buf;
+	p += snprintf(p, MAX_WX_STRING, "Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+		id->octet[0], id->octet[1], id->octet[2],
+		id->octet[3], id->octet[4], id->octet[5]);
+	wrqu->data.length = p - extra + 1;
+
+	return error;
+}
+
+
+static int
+wl_iw_set_country(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	char country_code[WLC_CNTRY_BUF_SZ];
+	int error = 0;
+	char *p = extra;
+	int country_offset;
+	int country_code_size;
+	wl_country_t cspec = {{0}, 0, {0}};
+	char smbuf[WLC_IOCTL_SMLEN];
+
+	cspec.rev = -1;
+	memset(country_code, 0, sizeof(country_code));
+	memset(smbuf, 0, sizeof(smbuf));
+
+	country_offset = strcspn(extra, " ");
+	country_code_size = strlen(extra) - country_offset;
+
+	if (country_offset != 0) {
+		strncpy(country_code, extra + country_offset +1,
+			MIN(country_code_size, sizeof(country_code)));
+
+
+		memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+		memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+
+		get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+
+		if ((error = dev_iw_iovar_setbuf(dev, "country", &cspec, \
+			sizeof(cspec), smbuf, sizeof(smbuf))) >= 0) {
+			p += snprintf(p, MAX_WX_STRING, "OK");
+			WL_ERROR(("%s: set country for %s as %s rev %d is OK\n", \
+				__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+			dhd_bus_country_set(dev, &cspec);
+			goto exit;
+		}
+	}
+
+	WL_ERROR(("%s: set country for %s as %s rev %d failed\n", \
+			__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+#ifdef CUSTOMER_HW2
+static int
+wl_iw_set_power_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	static int pm = PM_FAST;
+	int pm_local = PM_OFF;
+	char powermode_val = 0;
+
+	WL_TRACE_COEX(("%s: DHCP session cmd:%s\n", __FUNCTION__, extra));
+
+	strncpy((char *)&powermode_val, extra + strlen("POWERMODE") + 1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+		dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
+		dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+
+		/* Disable packet filtering if necessary */
+		net_os_set_packet_filter(dev, 0);
+
+		g_bt->dhcp_done = false;
+		WL_TRACE_COEX(("%s: DHCP start, pm:%d changed to pm:%d\n",
+			__FUNCTION__, pm, pm_local));
+
+	} else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+
+		dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+
+		/* Enable packet filtering if was turned off */
+		net_os_set_packet_filter(dev, 1);
+
+		g_bt->dhcp_done = true;
+
+	} else {
+		WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+			__FUNCTION__));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+
+	return error;
+}
+#endif
+
+
+bool btcoex_is_sco_active(struct net_device *dev)
+{
+	int ioc_res = 0;
+	bool res = false;
+	int sco_id_cnt = 0;
+	int param27;
+	int i;
+
+	for (i = 0; i < 12; i++) {
+
+		ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+		WL_TRACE_COEX(("%s, sample[%d], btc params: 27:%x\n",
+			__FUNCTION__, i, param27));
+
+		if (ioc_res < 0) {
+			WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__));
+			break;
+		}
+
+		if ((param27 & 0x6) == 2) {
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			WL_TRACE_COEX(("%s, sco/esco detected, pkt id_cnt:%d  samples:%d\n",
+				__FUNCTION__, sco_id_cnt, i));
+			res = true;
+			break;
+		}
+
+		msleep(5);
+	}
+
+	return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+	static bool saved_status = false;
+
+	char buf_reg50va_dhcp_on[8] = { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+	char buf_reg51va_dhcp_on[8] = { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg64va_dhcp_on[8] = { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg65va_dhcp_on[8] = { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg71va_dhcp_on[8] = { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg50;
+	static uint32 saved_reg51;
+	static uint32 saved_reg64;
+	static uint32 saved_reg65;
+	static uint32 saved_reg71;
+
+	if (trump_sco) {
+
+		WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+		if  ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50,  &saved_reg50)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 51,  &saved_reg51)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 64,  &saved_reg64)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 65,  &saved_reg65)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 71,  &saved_reg71))) {
+
+			saved_status = TRUE;
+			WL_TRACE_COEX(("%s saved bt_params[50,51,64,65,71]:"
+				" 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				__FUNCTION__, saved_reg50, saved_reg51,
+				saved_reg64, saved_reg65, saved_reg71));
+
+		} else {
+			WL_ERROR((":%s: save btc_params failed\n",
+				__FUNCTION__));
+			saved_status = false;
+			return -1;
+		}
+
+		WL_TRACE_COEX(("override with [50,51,64,65,71]:"
+			" 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			*(u32 *)(buf_reg50va_dhcp_on+4),
+			*(u32 *)(buf_reg51va_dhcp_on+4),
+			*(u32 *)(buf_reg64va_dhcp_on+4),
+			*(u32 *)(buf_reg65va_dhcp_on+4),
+			*(u32 *)(buf_reg71va_dhcp_on+4)));
+
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg50va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg51va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg64va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg65va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg71va_dhcp_on[0], 8);
+
+		saved_status = true;
+
+	} else if (saved_status) {
+
+		WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+		regaddr = 50;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg50);
+		regaddr = 51;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg51);
+		regaddr = 64;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg64);
+		regaddr = 65;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg65);
+		regaddr = 71;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg71);
+
+		WL_TRACE_COEX(("restore bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			saved_reg50, saved_reg51, saved_reg64,
+			saved_reg65, saved_reg71));
+
+		saved_status = false;
+	} else {
+		WL_ERROR((":%s att to restore not saved BTCOEX params\n",
+			__FUNCTION__));
+		return -1;
+	}
+	return 0;
+}
+#endif
+
+static int
+wl_iw_get_power_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error;
+	char *p = extra;
+	int pm_local = PM_FAST;
+
+	error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local));
+	if (!error) {
+		WL_TRACE(("%s: Powermode = %d\n", __func__, pm_local));
+		if (pm_local == PM_OFF)
+			pm_local = 1; /* Active */
+		else
+			pm_local = 0; /* Auto */
+		p += snprintf(p, MAX_WX_STRING, "powermode = %d", pm_local);
+	}
+	else {
+		WL_TRACE(("%s: Error = %d\n", __func__, error));
+		p += snprintf(p, MAX_WX_STRING, "FAIL");
+	}
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_set_btcoex_dhcp(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+#ifndef CUSTOMER_HW2
+	static int  pm = PM_FAST;
+	int  pm_local = PM_OFF;
+#endif
+	char powermode_val = 0;
+	char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+	char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+	char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg66;
+	static uint32 saved_reg41;
+	static uint32 saved_reg68;
+	static bool saved_status = FALSE;
+
+	char buf_flag7_default[8] =   { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+#ifdef CUSTOMER_HW2
+	strncpy((char *)&powermode_val, extra + strlen("BTCOEXMODE") + 1, 1);
+#else
+	strncpy((char *)&powermode_val, extra + strlen("POWERMODE") + 1, 1);
+#endif
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+		WL_TRACE_COEX(("%s: DHCP session start, cmd:%s\n", __FUNCTION__, extra));
+
+		if ((saved_status == FALSE) &&
+#ifndef CUSTOMER_HW2
+		   (!dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))) &&
+#endif
+		   (!dev_wlc_intvar_get_reg(dev, "btc_params", 66,  &saved_reg66)) &&
+		   (!dev_wlc_intvar_get_reg(dev, "btc_params", 41,  &saved_reg41)) &&
+		   (!dev_wlc_intvar_get_reg(dev, "btc_params", 68,  &saved_reg68))) {
+			WL_TRACE_COEX(("save regs {66,41,68} ->: 0x%x 0x%x 0x%x\n", \
+				saved_reg66, saved_reg41, saved_reg68));
+
+#ifndef CUSTOMER_HW2
+			dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+#endif
+
+				if (btcoex_is_sco_active(dev)) {
+
+					dev_wlc_bufvar_set(dev, "btc_params", \
+						(char *)&buf_reg66va_dhcp_on[0], \
+						 sizeof(buf_reg66va_dhcp_on));
+
+					dev_wlc_bufvar_set(dev, "btc_params", \
+						(char *)&buf_reg41va_dhcp_on[0], \
+						 sizeof(buf_reg41va_dhcp_on));
+
+					dev_wlc_bufvar_set(dev, "btc_params", \
+						(char *)&buf_reg68va_dhcp_on[0], \
+						 sizeof(buf_reg68va_dhcp_on));
+					saved_status = TRUE;
+
+					g_bt->bt_state = BT_DHCP_START;
+					g_bt->timer_on = 1;
+					mod_timer(&g_bt->timer, g_bt->timer.expires);
+					WL_TRACE_COEX(("%s enable BT DHCP Timer\n", \
+					__FUNCTION__));
+			}
+		}
+		else if (saved_status == TRUE) {
+			WL_ERROR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+		}
+	}
+#ifdef CUSTOMER_HW2
+	else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+#else
+	else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+#endif
+
+#ifndef CUSTOMER_HW2
+		dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+#endif
+
+		WL_TRACE_COEX(("%s disable BT DHCP Timer\n", __FUNCTION__));
+		if (g_bt->timer_on) {
+			g_bt->timer_on = 0;
+			del_timer_sync(&g_bt->timer);
+
+			if (g_bt->bt_state != BT_DHCP_IDLE) {
+				WL_TRACE_COEX(("%s bt->bt_state:%d\n",
+					__FUNCTION__, g_bt->bt_state));
+
+				up(&g_bt->bt_sem);
+			}
+		}
+
+		if (saved_status == TRUE) {
+			dev_wlc_bufvar_set(dev, "btc_flags", \
+				(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+			regaddr = 66;
+			dev_wlc_intvar_set_reg(dev, "btc_params", \
+				(char *)&regaddr, (char *)&saved_reg66);
+			regaddr = 41;
+			dev_wlc_intvar_set_reg(dev, "btc_params", \
+				(char *)&regaddr, (char *)&saved_reg41);
+			regaddr = 68;
+			dev_wlc_intvar_set_reg(dev, "btc_params", \
+				(char *)&regaddr, (char *)&saved_reg68);
+
+			WL_TRACE_COEX(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", \
+					saved_reg66, saved_reg41, saved_reg68));
+		}
+		saved_status = FALSE;
+	}
+	else {
+		WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+			__FUNCTION__));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+
+	return error;
+}
+
+static int
+wl_iw_set_suspend(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+	suspend_flag = *(extra + strlen(SETSUSPEND_CMD) + 1) - '0';
+
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+
+	ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+	if (ret_now != suspend_flag) {
+		if (!(ret = net_os_set_suspend(dev, ret_now)))
+			WL_ERROR(("%s: Suspend Flag %d -> %d\n", \
+					__FUNCTION__, ret_now, suspend_flag));
+		else
+			WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+	}
+
+	return ret;
+}
+
+
+int
+wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len)
+{
+	int i, c;
+	char *p = ssid_buf;
+
+	if (ssid_len > 32) ssid_len = 32;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (int)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += sprintf(p, "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+
+	return p - ssid_buf;
+}
+
+static int
+wl_iw_get_link_speed(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	static int link_speed;
+
+	net_os_wake_lock(dev);
+	if (g_onoff == G_WLAN_SET_ON) {
+		error = dev_wlc_ioctl(dev, WLC_GET_RATE, &link_speed, sizeof(link_speed));
+		link_speed *= 500000;
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "LinkSpeed %d", link_speed/1000000);
+
+	wrqu->data.length = p - extra + 1;
+
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_get_dtim_skip(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	char iovbuf[32];
+
+	net_os_wake_lock(dev);
+	if (g_onoff == G_WLAN_SET_ON) {
+
+			memset(iovbuf, 0, sizeof(iovbuf));
+			strcpy(iovbuf, "bcn_li_dtim");
+
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_VAR,
+				&iovbuf, sizeof(iovbuf))) >= 0) {
+
+				p += snprintf(p, MAX_WX_STRING, "Dtim_skip %d", iovbuf[0]);
+				WL_TRACE(("%s: get dtim_skip = %d\n", __FUNCTION__, iovbuf[0]));
+				wrqu->data.length = p - extra + 1;
+			}
+			else
+				WL_ERROR(("%s: get dtim_skip failed code %d\n", \
+					__FUNCTION__, error));
+	}
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_set_dtim_skip(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	int bcn_li_dtim;
+	char iovbuf[32];
+
+	net_os_wake_lock(dev);
+	if (g_onoff == G_WLAN_SET_ON) {
+
+		bcn_li_dtim = htod32((uint)*(extra + strlen(DTIM_SKIP_SET_CMD) + 1) - '0');
+
+		if ((bcn_li_dtim >= 0) || ((bcn_li_dtim <= 5))) {
+
+			memset(iovbuf, 0, sizeof(iovbuf));
+			bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+				4, iovbuf, sizeof(iovbuf));
+
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_VAR,
+				&iovbuf, sizeof(iovbuf))) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+
+				net_os_set_dtim_skip(dev, bcn_li_dtim);
+
+				WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__, \
+					bcn_li_dtim));
+				goto exit;
+			}
+			else  WL_ERROR(("%s: set dtim_skip %d failed code %d\n", \
+				__FUNCTION__, bcn_li_dtim, error));
+		}
+		else  WL_ERROR(("%s Incorrect dtim_skip setting %d, ignored\n", \
+			__FUNCTION__, bcn_li_dtim));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_get_band(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	static int band;
+
+	net_os_wake_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		error = dev_wlc_ioctl(dev, WLC_GET_BAND, &band, sizeof(band));
+
+		p += snprintf(p, MAX_WX_STRING, "Band %d", band);
+
+		wrqu->data.length = p - extra + 1;
+	}
+
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_set_band(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	uint band;
+
+	net_os_wake_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_ON) {
+
+		band = htod32((uint)*(extra + strlen(BAND_SET_CMD) + 1) - '0');
+
+		if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_BAND,
+				&band, sizeof(band))) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+				WL_TRACE(("%s: set band %d OK\n", __FUNCTION__, band));
+				goto exit;
+			}
+			else WL_ERROR(("%s: set band %d failed code %d\n", __FUNCTION__, \
+					band, error));
+		}
+		else WL_ERROR(("%s Incorrect band setting %d, ignored\n", __FUNCTION__, band));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+#ifdef PNO_SUPPORT
+
+static int
+wl_iw_set_pno_reset(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+
+	net_os_wake_lock(dev);
+	if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+		if ((error = dhd_dev_pno_reset(dev)) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+				WL_TRACE(("%s: set OK\n", __FUNCTION__));
+				goto exit;
+		}
+		else  WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+
+static int
+wl_iw_set_pno_enable(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	int pfn_enabled;
+
+	net_os_wake_lock(dev);
+	pfn_enabled = htod32((uint)*(extra + strlen(PNOENABLE_SET_CMD) + 1) - '0');
+
+	if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+		if ((error = dhd_dev_pno_enable(dev, pfn_enabled)) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+				WL_TRACE(("%s: set OK\n", __FUNCTION__));
+				goto exit;
+		}
+		else  WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+
+static int
+wl_iw_set_pno_set(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int res = -1;
+	wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+	int nssid = 0;
+	cmd_tlv_t *cmd_tlv_temp;
+	char *str_ptr;
+	int tlv_size_left;
+	int pno_time;
+	int pno_repeat;
+	int pno_freq_expo_max;
+
+#ifdef PNO_SET_DEBUG
+	int i;
+	char pno_in_example[] = {'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', \
+							'S', '1', '2', '0',
+							'S',
+							0x04,
+							'B', 'R', 'C', 'M',
+							'S',
+							0x04,
+							'G', 'O', 'O', 'G',
+							'T',
+							'1','E',
+							'R',
+							'2',
+							'M',
+							'2',
+							0x00
+							};
+#endif
+
+	net_os_wake_lock(dev);
+	WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	if (wrqu->data.length < (strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))) {
+		WL_ERROR(("%s aggument=%d  less %d\n", __FUNCTION__, \
+			wrqu->data.length, strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t)));
+		goto exit_proc;
+	}
+
+#ifdef PNO_SET_DEBUG
+	if (!(extra = kmalloc(sizeof(pno_in_example) +100, GFP_KERNEL))) {
+		res = -ENOMEM;
+		goto exit_proc;
+	}
+	memcpy(extra, pno_in_example, sizeof(pno_in_example));
+	wrqu->data.length = sizeof(pno_in_example);
+	for (i = 0; i < wrqu->data.length; i++)
+		printf("%02X ", extra[i]);
+	printf("\n");
+#endif
+
+	str_ptr = extra;
+#ifdef PNO_SET_DEBUG
+	str_ptr +=  strlen("PNOSETUP ");
+	tlv_size_left = wrqu->data.length - strlen("PNOSETUP ");
+#else
+	str_ptr +=  strlen(PNOSETUP_SET_CMD);
+	tlv_size_left = wrqu->data.length - strlen(PNOSETUP_SET_CMD);
+#endif
+
+	cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+	pno_repeat = pno_freq_expo_max = 0;
+
+	if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && \
+		(cmd_tlv_temp->version == PNO_TLV_VERSION) && \
+		(cmd_tlv_temp->subver == PNO_TLV_SUBVERSION))
+	{
+		str_ptr += sizeof(cmd_tlv_t);
+		tlv_size_left  -= sizeof(cmd_tlv_t);
+
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, \
+				MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+			WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		}
+		else {
+			if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+				WL_ERROR(("%s scan duration corrupted field size %d\n", \
+						__FUNCTION__, tlv_size_left));
+				goto exit_proc;
+			}
+			str_ptr++;
+			pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+			WL_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+			if (str_ptr[0] != 0) {
+				if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+					WL_ERROR(("%s pno repeat : corrupted field\n", \
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+				WL_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+				if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+					WL_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", \
+							__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+				WL_PNO(("%s: pno_freq_expo_max=%d\n", \
+							__FUNCTION__, pno_freq_expo_max));
+			}
+		}
+	}
+	else {
+		WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+	net_os_wake_unlock(dev);
+	return res;
+}
+#endif
+
+static int
+wl_iw_get_rssi(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	static int rssi = 0;
+	static wlc_ssid_t ssid = {0};
+	int error = 0;
+	char *p = extra;
+	static char ssidbuf[SSID_FMT_BUF_LEN];
+	scb_val_t scb_val;
+
+	net_os_wake_lock(dev);
+
+	bzero(&scb_val, sizeof(scb_val_t));
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		error = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+		if (error) {
+			WL_ERROR(("%s: Fails %d\n", __FUNCTION__, error));
+		} else {
+			rssi = dtoh32(scb_val.val);
+
+			error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
+			if (!error) {
+				ssid.SSID_len = dtoh32(ssid.SSID_len);
+				wl_format_ssid(ssidbuf, ssid.SSID, dtoh32(ssid.SSID_len));
+			}
+		}
+	}
+
+	WL_ASSOC(("%s ssid_len:%d, rssi:%d\n", __FUNCTION__, ssid.SSID_len, rssi));
+
+	if (error || (ssid.SSID_len == 0)) {
+		p += snprintf(p, MAX_WX_STRING, "FAIL");
+	} else {
+		p += snprintf(p, MAX_WX_STRING, "%s rssi %d ", ssidbuf, rssi);
+	}
+	wrqu->data.length = p - extra + 1;
+
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+int
+wl_iw_send_priv_event(
+	struct net_device *dev,
+	char *flag
+)
+{
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd;
+
+	cmd = IWEVCUSTOM;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (strlen(flag) > sizeof(extra))
+		return -1;
+
+	strcpy(extra, flag);
+	wrqu.data.length = strlen(extra);
+	wireless_send_event(dev, cmd, &wrqu, extra);
+	net_os_wake_lock_timeout_enable(dev);
+	WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+	return 0;
+}
+
+
+int
+wl_control_wl_start(struct net_device *dev)
+{
+	int ret = 0;
+	wl_iw_t *iw;
+
+	WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+
+	if (!iw) {
+		WL_ERROR(("%s: wl is null\n", __FUNCTION__));
+		return -1;
+	}
+	dhd_os_start_lock(iw->pub);
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+
+#if defined(BCMLXSDMMC)
+		sdioh_start(NULL, 0);
+#endif
+
+		ret = dhd_dev_reset(dev, 0);
+
+		if (ret == BCME_OK) {
+#if defined(BCMLXSDMMC)
+			sdioh_start(NULL, 1);
+#endif
+			dhd_dev_init_ioctl(dev);
+			g_onoff = G_WLAN_SET_ON;
+		}
+	}
+	WL_TRACE(("Exited %s \n", __FUNCTION__));
+
+	dhd_os_start_unlock(iw->pub);
+	return ret;
+}
+
+
+static int
+wl_iw_control_wl_off(
+	struct net_device *dev,
+	struct iw_request_info *info
+)
+{
+	int ret = 0;
+	wl_iw_t *iw;
+
+	WL_TRACE(("Enter %s\n", __FUNCTION__));
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+	if (!iw) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+	dhd_os_start_lock(iw->pub);
+
+#ifdef SOFTAP
+	ap_cfg_running = FALSE;
+#endif
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		g_onoff = G_WLAN_SET_OFF;
+#if defined(WL_IW_USE_ISCAN)
+		g_iscan->iscan_state = ISCAN_STATE_IDLE;
+#endif
+
+		dhd_dev_reset(dev, 1);
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+		wl_iw_free_ss_cache();
+		wl_iw_run_ss_cache_timer(0);
+
+		g_ss_cache_ctrl.m_link_down = 1;
+#endif
+		memset(g_scan, 0, G_SCAN_RESULTS);
+		g_scan_specified_ssid = 0;
+#if defined(CONFIG_FIRST_SCAN)
+		g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+		g_first_counter_scans = 0;
+#endif
+#endif
+
+#if defined(BCMLXSDMMC)
+		sdioh_stop(NULL);
+#endif
+
+		net_os_set_dtim_skip(dev, 0);
+
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+
+		wl_iw_send_priv_event(dev, "STOP");
+	}
+
+	dhd_os_start_unlock(iw->pub);
+
+	WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+	return ret;
+}
+
+static int
+wl_iw_control_wl_on(
+	struct net_device *dev,
+	struct iw_request_info *info
+)
+{
+	int ret = 0;
+
+	WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+	if ((ret = wl_control_wl_start(dev)) != BCME_OK) {
+		WL_ERROR(("%s failed first attemp\n", __FUNCTION__));
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+		if ((ret = wl_control_wl_start(dev)) != BCME_OK) {
+			WL_ERROR(("%s failed second attemp\n", __FUNCTION__));
+			net_os_send_hang_message(dev);
+			return ret;
+		}
+	}
+
+	wl_iw_send_priv_event(dev, "START");
+
+#ifdef SOFTAP
+	if (!ap_fw_loaded) {
+		wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+	}
+#else
+	wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+#endif
+
+	WL_TRACE(("Exited %s \n", __FUNCTION__));
+
+	return ret;
+}
+
+#ifdef SOFTAP
+static struct ap_profile my_ap;
+static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap);
+static int get_assoc_sta_list(struct net_device *dev, char *buf, int len);
+static int set_ap_mac_list(struct net_device *dev, void *buf);
+
+#define PTYPE_STRING	0
+#define PTYPE_INTDEC	1
+#define PTYPE_INTHEX	2
+#define PTYPE_STR_HEX	3
+int get_parmeter_from_string(
+	char **str_ptr, const char *token, int param_type, void  *dst, int param_max_len);
+
+#endif
+
+int hex2num(char c)
+{
+	if (c >= '0' && c <= '9')
+		return c - '0';
+	if (c >= 'a' && c <= 'f')
+		return c - 'a' + 10;
+	if (c >= 'A' && c <= 'F')
+		return c - 'A' + 10;
+	return -1;
+}
+
+int hex2byte(const char *hex)
+{
+	int a, b;
+	a = hex2num(*hex++);
+	if (a < 0)
+		return -1;
+	b = hex2num(*hex++);
+	if (b < 0)
+		return -1;
+	return (a << 4) | b;
+}
+
+
+
+int hstr_2_buf(const char *txt, u8 *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		int a, b;
+
+		a = hex2num(*txt++);
+		if (a < 0)
+			return -1;
+		b = hex2num(*txt++);
+		if (b < 0)
+			return -1;
+		*buf++ = (a << 4) | b;
+	}
+
+	return 0;
+}
+
+#if defined(SOFTAP) && defined(SOFTAP_TLV_CFG)
+
+static int wl_iw_softap_cfg_tlv(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int res = -1;
+	char *str_ptr;
+	int tlv_size_left;
+
+
+#define SOFTAP_TLV_DEBUG  1
+#ifdef SOFTAP_TLV_DEBUG
+char softap_cmd_example[] = {
+
+	'S', 'O', 'F', 'T', 'A', 'P', 'S', 'E', 'T', ' ',
+
+	SOFTAP_TLV_PREFIX, SOFTAP_TLV_VERSION,
+	SOFTAP_TLV_SUBVERSION, SOFTAP_TLV_RESERVED,
+
+	TLV_TYPE_SSID,		9, 'B', 'R', 'C', 'M', ',', 'G', 'O', 'O', 'G',
+
+	TLV_TYPE_SECUR,		4, 'O', 'P', 'E', 'N',
+
+	TLV_TYPE_KEY,		4, 0x31, 0x32, 0x33, 0x34,
+
+	TLV_TYPE_CHANNEL,	4, 0x06, 0x00, 0x00, 0x00
+};
+#endif
+
+
+#ifdef SOFTAP_TLV_DEBUG
+	{
+	int i;
+	if (!(extra = kmalloc(sizeof(softap_cmd_example) +10, GFP_KERNEL)))
+		return -ENOMEM;
+	memcpy(extra, softap_cmd_example, sizeof(softap_cmd_example));
+	wrqu->data.length = sizeof(softap_cmd_example);
+	print_buf(extra, wrqu->data.length, 16);
+	for (i = 0; i < wrqu->data.length; i++)
+		printf("%c ", extra[i]);
+	printf("\n");
+	}
+#endif
+
+	WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (wrqu->data.length < (strlen(SOFTAP_SET_CMD) + sizeof(cmd_tlv_t))) {
+		WL_ERROR(("%s argument=%d  less %d\n", __FUNCTION__,
+			wrqu->data.length, strlen(SOFTAP_SET_CMD) + sizeof(cmd_tlv_t)));
+		return -1;
+	}
+
+	str_ptr =  extra + strlen(SOFTAP_SET_CMD)+1; 
+	tlv_size_left = wrqu->data.length - (strlen(SOFTAP_SET_CMD)+1);
+
+	memset(&my_ap, 0, sizeof(my_ap));
+
+	return res;
+}
+#endif
+
+
+#ifdef SOFTAP
+int init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg)
+{
+	char *str_ptr = param_str;
+	char sub_cmd[16];
+	int ret = 0;
+
+	memset(sub_cmd, 0, sizeof(sub_cmd));
+	memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+	if (get_parmeter_from_string(&str_ptr, "ASCII_CMD=",
+		PTYPE_STRING, sub_cmd, SSID_LEN) != 0) {
+		return -1;
+	}
+	if (strncmp(sub_cmd, "AP_CFG", 6)) {
+		WL_ERROR(("ERROR: sub_cmd:%s != 'AP_CFG'!\n", sub_cmd));
+		return -1;
+	}
+
+	ret = get_parmeter_from_string(&str_ptr, "SSID=", PTYPE_STRING, ap_cfg->ssid, SSID_LEN);
+
+	ret |= get_parmeter_from_string(&str_ptr, "SEC=", PTYPE_STRING,  ap_cfg->sec, SEC_LEN);
+
+	ret |= get_parmeter_from_string(&str_ptr, "KEY=", PTYPE_STRING,  ap_cfg->key, KEY_LEN);
+
+	ret |= get_parmeter_from_string(&str_ptr, "CHANNEL=", PTYPE_INTDEC, &ap_cfg->channel, 5);
+
+	get_parmeter_from_string(&str_ptr, "PREAMBLE=", PTYPE_INTDEC, &ap_cfg->preamble, 5);
+
+	get_parmeter_from_string(&str_ptr, "MAX_SCB=", PTYPE_INTDEC,  &ap_cfg->max_scb, 5);
+
+	get_parmeter_from_string(&str_ptr, "HIDDEN=", PTYPE_INTDEC, &ap_cfg->closednet, 5);
+
+	get_parmeter_from_string(&str_ptr, "COUNTRY=", PTYPE_STRING, &ap_cfg->country_code, 3);
+
+	return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+static int iwpriv_set_ap_config(struct net_device *dev,
+		struct iw_request_info *info,
+		union iwreq_data *wrqu,
+		char *ext)
+{
+	int res = 0;
+	char  *extra = NULL;
+	struct ap_profile *ap_cfg = &my_ap;
+
+	WL_TRACE(("%s: info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+		__FUNCTION__,
+		info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		char *str_ptr;
+
+		if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		extra[wrqu->data.length] = 0;
+		WL_SOFTAP((" Got str param in iw_point:\n %s\n", extra));
+
+		memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+		str_ptr = extra;
+
+		if ((res = init_ap_profile_from_string(extra, ap_cfg)) < 0) {
+			WL_ERROR(("%s failed to parse %d\n", __FUNCTION__, res));
+			kfree(extra);
+			return -1;
+		}
+
+	} else {
+		WL_ERROR(("IWPRIV argument len = 0 \n"));
+		return -1;
+	}
+
+	if ((res = set_ap_cfg(dev, ap_cfg)) < 0)
+		WL_ERROR(("%s failed to set_ap_cfg %d\n", __FUNCTION__, res));
+
+	kfree(extra);
+
+	return res;
+}
+#endif
+
+
+#ifdef SOFTAP
+static int iwpriv_get_assoc_list(struct net_device *dev,
+		struct iw_request_info *info,
+		union iwreq_data *p_iwrq,
+		char *extra)
+{
+	int i, ret = 0;
+	char mac_buf[256];
+	struct maclist *sta_maclist = (struct maclist *)mac_buf;
+
+	char mac_lst[384];
+	char *p_mac_str;
+	char *p_mac_str_end;
+
+	if ((!dev) || (!extra)) {
+		return -EINVAL;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_TRACE(("\n %s: IWPRIV IOCTL: cmd:%hx, flags:%hx, extra:%p, iwp.len:%d, \
+		iwp.len:%p, iwp.flags:%x  \n", __FUNCTION__, info->cmd, info->flags, \
+		extra, p_iwrq->data.length, p_iwrq->data.pointer, p_iwrq->data.flags));
+
+	memset(sta_maclist, 0, sizeof(mac_buf));
+
+	sta_maclist->count = 8;
+
+	WL_SOFTAP(("%s: net device:%s, buf_sz:%d\n",
+		__FUNCTION__, dev->name, sizeof(mac_buf)));
+
+	if ((ret = get_assoc_sta_list(dev, mac_buf, sizeof(mac_buf))) < 0) {
+		WL_ERROR(("%s: sta list ioctl error:%d\n",
+			__FUNCTION__, ret));
+		goto func_exit;
+	}
+
+	WL_SOFTAP(("%s: got %d stations\n", __FUNCTION__,
+		sta_maclist->count));
+
+	memset(mac_lst, 0, sizeof(mac_lst));
+	p_mac_str = mac_lst;
+	p_mac_str_end = &mac_lst[sizeof(mac_lst)-1];
+
+	for (i = 0; i < 8; i++) {
+		struct ether_addr *id = &sta_maclist->ea[i];
+		if (!ETHER_ISNULLADDR(id->octet)) {
+			scb_val_t scb_val;
+			int rssi = 0;
+
+			bzero(&scb_val, sizeof(scb_val_t));
+
+			if ((p_mac_str_end - p_mac_str) <= 36) {
+				WL_ERROR(("%s: mac list buf is < 36 for item[%i] item\n",
+					__FUNCTION__, i));
+				break;
+			}
+
+			p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+			"\nMac[%d]=%02X:%02X:%02X:%02X:%02X:%02X,", i,
+			id->octet[0], id->octet[1], id->octet[2],
+			id->octet[3], id->octet[4], id->octet[5]);
+
+			bcopy(id->octet, &scb_val.ea, 6);
+			ret = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+			if (ret  < 0) {
+				snprintf(p_mac_str, MAX_WX_STRING, "RSSI:ERR");
+				WL_ERROR(("%s: RSSI ioctl error:%d\n",
+					__FUNCTION__, ret));
+				break;
+			}
+
+			rssi = dtoh32(scb_val.val);
+			p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+			"RSSI:%d", rssi);
+		}
+	}
+
+	p_iwrq->data.length = strlen(mac_lst) + 1;
+
+	WL_SOFTAP(("%s: data to user:\n%s\n usr_ptr:%p\n", __FUNCTION__,
+		mac_lst, p_iwrq->data.pointer));
+
+	if (p_iwrq->data.length) {
+		bcopy(mac_lst, extra, p_iwrq->data.length);
+	}
+
+func_exit:
+	net_os_wake_unlock(dev);
+
+	WL_TRACE(("Exited %s \n", __FUNCTION__));
+	return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+#define MAC_FILT_MAX 8
+static int iwpriv_set_mac_filters(struct net_device *dev,
+		struct iw_request_info *info,
+		union iwreq_data *wrqu,
+		char *ext)
+{
+	int i, ret = -1;
+	char  * extra = NULL;
+	int mac_cnt = 0;
+	int mac_mode = 0;
+	struct ether_addr *p_ea;
+	struct mac_list_set mflist_set;
+
+	WL_SOFTAP((">>> Got IWPRIV SET_MAC_FILTER IOCTL:  info->cmd:%x, \
+			info->flags:%x, u.data:%p, u.len:%d\n",
+			info->cmd, info->flags,
+			wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		char *str_ptr;
+
+		if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		extra[wrqu->data.length] = 0;
+		WL_SOFTAP((" Got parameter string in iw_point:\n %s \n", extra));
+
+		memset(&mflist_set, 0, sizeof(mflist_set));
+
+		str_ptr = extra;
+
+		if (get_parmeter_from_string(&str_ptr, "MAC_MODE=",
+			PTYPE_INTDEC, &mac_mode, 4) != 0) {
+			WL_ERROR(("ERROR: 'MAC_MODE=' token is missing\n"));
+			goto exit_proc;
+		}
+
+		p_ea = &mflist_set.mac_list.ea[0];
+
+		if (get_parmeter_from_string(&str_ptr, "MAC_CNT=",
+			PTYPE_INTDEC, &mac_cnt, 4) != 0) {
+			WL_ERROR(("ERROR: 'MAC_CNT=' token param is missing \n"));
+			goto exit_proc;
+		}
+
+		if (mac_cnt > MAC_FILT_MAX) {
+			WL_ERROR(("ERROR: number of MAC filters > MAX\n"));
+			goto exit_proc;
+		}
+
+		for (i=0; i < mac_cnt; i++)
+			if (get_parmeter_from_string(&str_ptr, "MAC=",
+				PTYPE_STR_HEX, &p_ea[i], 12) != 0) {
+				WL_ERROR(("ERROR: MAC_filter[%d] is missing !\n", i));
+				goto exit_proc;
+			}
+
+		WL_SOFTAP(("MAC_MODE=:%d, MAC_CNT=%d, MACs:..\n", mac_mode, mac_cnt));
+		for (i = 0; i < mac_cnt; i++) {
+		   WL_SOFTAP(("mac_filt[%d]:", i));
+		   print_buf(&p_ea[i], 6, 0);
+		}
+
+		mflist_set.mode = mac_mode;
+		mflist_set.mac_list.count = mac_cnt;
+		set_ap_mac_list(dev, &mflist_set);
+
+		wrqu->data.pointer = NULL;
+		wrqu->data.length = 0;
+		ret = 0;
+
+	} else {
+		WL_ERROR(("IWPRIV argument len is 0\n"));
+		return -1;
+	}
+
+	exit_proc:
+	kfree(extra);
+	return ret;
+}
+#endif
+
+
+#ifdef SOFTAP
+static int iwpriv_set_ap_sta_disassoc(struct net_device *dev,
+        struct iw_request_info *info,
+        union iwreq_data *wrqu,
+        char *ext)
+{
+	int res = 0;
+	char sta_mac[6] = {0, 0, 0, 0, 0, 0};
+	char cmd_buf[256];
+	char *str_ptr = cmd_buf;
+
+	WL_SOFTAP((">>%s called\n args: info->cmd:%x,"
+		" info->flags:%x, u.data.p:%p, u.data.len:%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		if (copy_from_user(cmd_buf, wrqu->data.pointer, wrqu->data.length)) {
+			return -EFAULT;
+		}
+
+		if (get_parmeter_from_string(&str_ptr,
+			"MAC=", PTYPE_STR_HEX, sta_mac, 12) == 0) {
+			res = wl_iw_softap_deassoc_stations(dev, sta_mac);
+		} else  {
+			WL_ERROR(("ERROR: STA_MAC= token not found\n"));
+		}
+	}
+
+	return res;
+}
+#endif
+
+#endif
+
+
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+	__u16		cmd;		
+	__u16		flags;		
+};
+
+typedef int (*iw_handler)(struct net_device *dev,
+		struct iw_request_info *info,
+		void *wrqu,
+		char *extra);
+#endif 
+
+static int
+wl_iw_config_commit(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *zwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct sockaddr bssid;
+
+	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+		return error;
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	if (!ssid.SSID_len)
+		return 0;
+
+	bzero(&bssid, sizeof(struct sockaddr));
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __FUNCTION__, ssid.SSID));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_name(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	char *cwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+	strcpy(cwrq, "IEEE 802.11-DS");
+
+	return 0;
+}
+
+static int
+wl_iw_set_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error, chan;
+	uint sf = 0;
+
+	WL_TRACE(("%s %s: SIOCSIWFREQ\n", __FUNCTION__, dev->name));
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("%s:>> not executed, 'SOFT_AP is active' \n", __FUNCTION__));
+		return 0;
+	}
+#endif
+
+	
+	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+		chan = fwrq->m;
+	}
+
+	
+	else {
+		
+		if (fwrq->e >= 6) {
+			fwrq->e -= 6;
+			while (fwrq->e--)
+				fwrq->m *= 10;
+		} else if (fwrq->e < 6) {
+			while (fwrq->e++ < 6)
+				fwrq->m /= 10;
+		}
+	
+	if (fwrq->m > 4000 && fwrq->m < 5000)
+		sf = WF_CHAN_FACTOR_4_G; 
+
+		chan = wf_mhz2channel(fwrq->m, sf);
+	}
+	chan = htod32(chan);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+		return error;
+
+	g_wl_iw_params.target_channel = chan;
+	
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+
+	fwrq->m = dtoh32(ci.hw_channel);
+	fwrq->e = dtoh32(0);
+	return 0;
+}
+
+static int
+wl_iw_set_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int infra = 0, ap = 0, error = 0;
+
+	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+	switch (*uwrq) {
+	case IW_MODE_MASTER:
+		infra = ap = 1;
+		break;
+	case IW_MODE_ADHOC:
+	case IW_MODE_AUTO:
+		break;
+	case IW_MODE_INFRA:
+		infra = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	infra = htod32(infra);
+	ap = htod32(ap);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+		return error;
+
+	
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int error, infra = 0, ap = 0;
+
+	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+		return error;
+
+	infra = dtoh32(infra);
+	ap = dtoh32(ap);
+	*uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+	return 0;
+}
+
+static int
+wl_iw_get_range(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	struct iw_range *range = (struct iw_range *) extra;
+	wl_uint32_list_t *list;
+	wl_rateset_t rateset;
+	int8 *channels;
+	int error, i, k;
+	uint sf, ch;
+
+	int phytype;
+	int bw_cap = 0, sgi_tx = 0, nmode = 0;
+	channel_info_t ci;
+	uint8 nrate_list2copy = 0;
+	uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+		{14, 29, 43, 58, 87, 116, 130, 144},
+		{27, 54, 81, 108, 162, 216, 243, 270},
+		{30, 60, 90, 120, 180, 240, 270, 300}};
+
+	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	channels = kmalloc((MAXCHANNEL+1)*4, GFP_KERNEL);
+	if (!channels) {
+		WL_ERROR(("Could not alloc channels\n"));
+		return -ENOMEM;
+	}
+	list = (wl_uint32_list_t *)channels;
+
+	dwrq->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(range));
+
+	range->min_nwid = range->max_nwid = 0;
+
+	list->count = htod32(MAXCHANNEL);
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, (MAXCHANNEL+1)*4))) {
+		kfree(channels);
+		return error;
+	}
+	for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+		range->freq[i].i = dtoh32(list->element[i]);
+
+		ch = dtoh32(list->element[i]);
+		if (ch <= CH_MAX_2G_CHANNEL)
+			sf = WF_CHAN_FACTOR_2_4_G;
+		else
+			sf = WF_CHAN_FACTOR_5_G;
+
+		range->freq[i].m = wf_channel2mhz(ch, sf);
+		range->freq[i].e = 6;
+	}
+	range->num_frequency = range->num_channels = i;
+
+	range->max_qual.qual = 5;
+	
+	range->max_qual.level = 0x100 - 200;	
+	
+	range->max_qual.noise = 0x100 - 200;	
+	
+	range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+	
+	range->avg_qual.qual = 3;
+	
+	range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+	
+	range->avg_qual.noise = 0x100 - 75;	
+#endif 
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) {
+		kfree(channels);
+		return error;
+	}
+	rateset.count = dtoh32(rateset.count);
+	range->num_bitrates = rateset.count;
+	for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+		range->bitrate[i] = (rateset.rates[i]& 0x7f) * 500000; 
+	dev_wlc_intvar_get(dev, "nmode", &nmode);
+	dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype));
+
+	if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) {
+		dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
+		dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
+		dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t));
+		ci.hw_channel = dtoh32(ci.hw_channel);
+
+		if (bw_cap == 0 ||
+			(bw_cap == 2 && ci.hw_channel <= 14)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 0;
+			else
+				nrate_list2copy = 1;
+		}
+		if (bw_cap == 1 ||
+			(bw_cap == 2 && ci.hw_channel >= 36)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 2;
+			else
+				nrate_list2copy = 3;
+		}
+		range->num_bitrates += 8;
+		for (k = 0; i < range->num_bitrates; k++, i++) {
+			
+			range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+		}
+	}
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) {
+		kfree(channels);
+		return error;
+	}
+	i = dtoh32(i);
+	if (i == WLC_PHY_TYPE_A)
+		range->throughput = 24000000;	
+	else
+		range->throughput = 1500000;	
+
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+	range->num_encoding_sizes = 4;
+	range->encoding_size[0] = WEP1_KEY_SIZE;
+	range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+	range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+	range->encoding_size[2] = 0;
+#endif
+	range->encoding_size[3] = AES_KEY_SIZE;
+
+	range->min_pmp = 0;
+	range->max_pmp = 0;
+	range->min_pmt = 0;
+	range->max_pmt = 0;
+	range->pmp_flags = 0;
+	range->pm_capa = 0;
+
+	range->num_txpower = 2;
+	range->txpower[0] = 1;
+	range->txpower[1] = 255;
+	range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 19;
+
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->r_time_flags = 0;
+	
+	range->min_retry = 1;
+	range->max_retry = 255;
+	
+	range->min_r_time = 0;
+	range->max_r_time = 0;
+#endif 
+
+#if WIRELESS_EXT > 17
+	range->enc_capa = IW_ENC_CAPA_WPA;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+#ifdef BCMWPA2
+	range->enc_capa |= IW_ENC_CAPA_WPA2;
+#endif
+
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+#ifdef BCMWPA2
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+#endif
+#endif 
+
+	kfree(channels);
+
+	return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+
+static int
+wl_iw_set_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	int i;
+
+	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+	for (i = 0; i < iw->spy_num; i++)
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+	return 0;
+}
+
+static int
+wl_iw_get_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = iw->spy_num;
+	for (i = 0; i < iw->spy_num; i++) {
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		addr[i].sa_family = AF_UNIX;
+		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+		iw->spy_qual[i].updated = 0;
+	}
+
+	return 0;
+}
+
+
+static int
+wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params, int *join_params_size)
+{
+	chanspec_t chanspec = 0;
+
+	if (ch != 0) {
+
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0])
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+		        htodchanspec(join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num = htod32(join_params->params.chanspec_num);
+
+		WL_TRACE(("%s  join_params->params.chanspec_list[0]= %X\n", \
+			__FUNCTION__, join_params->params.chanspec_list[0]));
+	}
+	return 1;
+}
+
+static int
+wl_iw_set_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	int error = -EINVAL;
+	wl_join_params_t join_params;
+	int join_params_size;
+
+	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+	if (awrq->sa_family != ARPHRD_ETHER) {
+		WL_ERROR(("Invalid Header...sa_family\n"));
+		return -EINVAL;
+	}
+
+	
+	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+		scb_val_t scbval;
+		
+		bzero(&scbval, sizeof(scb_val_t));
+		
+		(void) dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+		return 0;
+	}
+
+
+	
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
+	memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+
+	WL_ASSOC(("%s  target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel));
+	wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) {
+		WL_ERROR(("%s Invalid ioctl data=%d\n", __FUNCTION__, error));
+		return error;
+	}
+
+	if (g_ssid.SSID_len) {
+		WL_ASSOC(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__,  \
+			g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data), \
+			g_wl_iw_params.target_channel));
+	}
+
+	
+	memset(&g_ssid, 0, sizeof(g_ssid));
+	return 0;
+}
+
+static int
+wl_iw_get_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+	awrq->sa_family = ARPHRD_ETHER;
+	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+	
+	(void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	struct iw_mlme *mlme;
+	scb_val_t scbval;
+	int error  = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name));
+
+	mlme = (struct iw_mlme *)extra;
+	if (mlme == NULL) {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	scbval.val = mlme->reason_code;
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+	if (mlme->cmd == IW_MLME_DISASSOC) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+	}
+	else if (mlme->cmd == IW_MLME_DEAUTH) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+			sizeof(scb_val_t));
+	}
+	else {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	return error;
+}
+#endif 
+
+#ifndef WL_IW_USE_ISCAN
+static int
+wl_iw_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int error, i;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+	if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("%s: list->version %d != WL_BSS_INFO_VERSION\n", \
+			 __FUNCTION__, list->version));
+		kfree(list);
+		return -EINVAL;
+	}
+
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+		if ((dtoh32(bi->length) > buflen) ||
+		    (((uintptr)bi + dtoh32(bi->length)) > ((uintptr)list + buflen))) {
+			WL_ERROR(("%s: Scan results out of bounds: %u\n",__FUNCTION__,dtoh32(bi->length)));
+			kfree(list);
+			return -E2BIG;
+		}
+
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif 
+
+		dwrq->length++;
+	}
+
+	kfree(list);
+
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		
+		dwrq->flags = 1;
+	}
+	return 0;
+}
+#endif
+
+#ifdef WL_IW_USE_ISCAN
+static int
+wl_iw_iscan_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	iscan_buf_t * buf;
+	iscan_info_t *iscan = g_iscan;
+
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		WL_ERROR(("%s error\n", __FUNCTION__));
+		return 0;
+	}
+
+	buf = iscan->list_hdr;
+	
+	while (buf) {
+		list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+		if (list->version != WL_BSS_INFO_VERSION) {
+			WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \
+				__FUNCTION__, list->version));
+			return -EINVAL;
+		}
+
+		bi = NULL;
+		for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+			bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
+			          : list->bss_info;
+
+			if ((dtoh32(bi->length) > WLC_IW_ISCAN_MAXLEN) ||
+			    (((uintptr)bi + dtoh32(bi->length)) > ((uintptr)list + WLC_IW_ISCAN_MAXLEN))) {
+				WL_ERROR(("%s: Scan results out of bounds: %u\n",__FUNCTION__,dtoh32(bi->length)));
+				return -E2BIG;
+			}
+
+			if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+				continue;
+
+			memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+			addr[dwrq->length].sa_family = ARPHRD_ETHER;
+			qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+			qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+			qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+#if WIRELESS_EXT > 18
+			qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+			qual[dwrq->length].updated = 7;
+#endif 
+
+			dwrq->length++;
+		}
+		buf = buf->next;
+	}
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		
+		dwrq->flags = 1;
+	}
+	return 0;
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+#if defined(CONFIG_FIRST_SCAN)
+	if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+		params->passive_time = 30;
+#endif
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+	return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+	int err = 0;
+
+	iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+	iscan->iscan_ex_params_p->action = htod16(action);
+	iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+	WL_SCAN(("%s : nprobes=%d\n", __FUNCTION__, iscan->iscan_ex_params_p->params.nprobes));
+	WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+	WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+	WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+	WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+	WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type));
+
+	if ((err = dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p, \
+		iscan->iscan_ex_param_size, iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+			WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+			err = -1;
+	}
+
+	return err;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+	iscan_info_t *iscan = (iscan_info_t *)data;
+	if (iscan) {
+		iscan->timer_on = 0;
+		if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+			WL_SCAN(("timer trigger\n"));
+			up(&iscan->sysioc_sem);
+		}
+	}
+}
+static void wl_iw_set_event_mask(struct net_device *dev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	
+
+	dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+		iovbuf, sizeof(iovbuf));
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+	iscan_buf_t * buf;
+	iscan_buf_t * ptr;
+	wl_iscan_results_t * list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	uint32 status;
+	int res;
+
+	mutex_lock(&wl_cache_lock);
+	if (iscan->list_cur) {
+		buf = iscan->list_cur;
+		iscan->list_cur = buf->next;
+	}
+	else {
+		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+		if (!buf) {
+			WL_ERROR(("%s can't alloc iscan_buf_t : going to abort currect iscan\n", \
+						__FUNCTION__));
+			mutex_unlock(&wl_cache_lock);
+			return WL_SCAN_RESULTS_NO_MEM;
+		}
+		buf->next = NULL;
+		if (!iscan->list_hdr)
+			iscan->list_hdr = buf;
+		else {
+			ptr = iscan->list_hdr;
+			while (ptr->next) {
+				ptr = ptr->next;
+			}
+			ptr->next = buf;
+		}
+	}
+	memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	res = dev_iw_iovar_getbuf(
+		iscan->dev,
+		"iscanresults",
+		&list,
+		WL_ISCAN_RESULTS_FIXED_SIZE,
+		buf->iscan_buf,
+		WLC_IW_ISCAN_MAXLEN);
+	if (res == 0) {
+		results->buflen = dtoh32(results->buflen);
+		results->version = dtoh32(results->version);
+		results->count = dtoh32(results->count);
+		WL_SCAN(("results->count = %d\n", results->count));
+
+		WL_SCAN(("results->buflen = %d\n", results->buflen));
+		status = dtoh32(list_buf->status);
+	} else {
+		WL_ERROR(("%s returns error %d\n", __FUNCTION__, res));
+		status = WL_SCAN_RESULTS_NO_MEM;
+	}
+	mutex_unlock(&wl_cache_lock);
+	return status;
+}
+
+static void wl_iw_force_specific_scan(iscan_info_t *iscan)
+{
+	WL_SCAN(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_lock();
+#endif
+	(void) dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_unlock();
+#endif
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+#ifndef SANDGATE2G
+	union iwreq_data wrqu;
+
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+#if defined(CONFIG_FIRST_SCAN)
+	if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+		g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY;
+#endif
+	WL_SCAN(("Send Event ISCAN complete\n"));
+#endif 
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+	uint32 status;
+	iscan_info_t *iscan = (iscan_info_t *)data;
+	static bool iscan_pass_abort = FALSE;
+
+	DAEMONIZE("iscan_sysioc");
+
+	status = WL_SCAN_RESULTS_PARTIAL;
+	while (down_interruptible(&iscan->sysioc_sem) == 0) {
+
+		net_os_wake_lock(iscan->dev);
+
+#if defined(SOFTAP)
+		if (ap_cfg_running) {
+			WL_SCAN(("%s skipping SCAN ops in AP mode !!!\n", __FUNCTION__));
+			net_os_wake_unlock(iscan->dev);
+			continue;
+		}
+#endif
+
+		if (iscan->timer_on) {
+			iscan->timer_on = 0;
+			del_timer_sync(&iscan->timer);
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_lock();
+#endif
+		status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_unlock();
+#endif
+
+		if (g_scan_specified_ssid && (iscan_pass_abort == TRUE)) {
+			WL_SCAN(("%s Get results from specific scan status=%d\n", __FUNCTION__, status));
+			wl_iw_send_scan_complete(iscan);
+			iscan_pass_abort = FALSE;
+			status  = -1;
+		}
+
+		switch (status) {
+			case WL_SCAN_RESULTS_PARTIAL:
+				WL_SCAN(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_lock();
+#endif
+
+				wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_unlock();
+#endif
+
+				mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_SUCCESS:
+				WL_SCAN(("iscanresults complete\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			case WL_SCAN_RESULTS_PENDING:
+				WL_SCAN(("iscanresults pending\n"));
+
+				mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_ABORTED:
+				WL_SCAN(("iscanresults aborted\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				if (g_scan_specified_ssid == 0)
+					wl_iw_send_scan_complete(iscan);
+				else {
+					iscan_pass_abort = TRUE;
+					wl_iw_force_specific_scan(iscan);
+				}
+				break;
+			case WL_SCAN_RESULTS_NO_MEM:
+				WL_SCAN(("iscanresults can't alloc memory: skip\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				break;
+			default:
+				WL_SCAN(("iscanresults returned unknown status %d\n", status));
+				break;
+		}
+
+		net_os_wake_unlock(iscan->dev);
+	}
+
+	if (iscan->timer_on) {
+		iscan->timer_on = 0;
+		del_timer_sync(&iscan->timer);
+	}
+
+	complete_and_exit(&iscan->sysioc_exited, 0);
+}
+#endif 
+
+#if !defined(CSCAN)
+
+static void
+wl_iw_set_ss_cache_timer_flag(void)
+{
+	g_ss_cache_ctrl.m_timer_expired = 1;
+	WL_TRACE(("%s called\n", __FUNCTION__));
+}
+
+static int
+wl_iw_init_ss_cache_ctrl(void)
+{
+	WL_TRACE(("%s :\n", __FUNCTION__));
+	g_ss_cache_ctrl.m_prev_scan_mode = 0;
+	g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+	g_ss_cache_ctrl.m_cache_head = NULL;
+	g_ss_cache_ctrl.m_link_down = 0;
+	g_ss_cache_ctrl.m_timer_expired = 0;
+	memset(g_ss_cache_ctrl.m_active_bssid, 0, ETHER_ADDR_LEN);
+
+	g_ss_cache_ctrl.m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+	if (!g_ss_cache_ctrl.m_timer) {
+		return -ENOMEM;
+	}
+	g_ss_cache_ctrl.m_timer->function = (void *)wl_iw_set_ss_cache_timer_flag;
+	init_timer(g_ss_cache_ctrl.m_timer);
+
+	return 0;
+}
+
+
+
+static void
+wl_iw_free_ss_cache(void)
+{
+	wl_iw_ss_cache_t *node, *cur;
+	wl_iw_ss_cache_t **spec_scan_head;
+
+	WL_TRACE(("%s called\n", __FUNCTION__));
+
+	mutex_lock(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+	node = *spec_scan_head;
+
+	for (;node;) {
+		WL_TRACE(("%s : SSID - %s\n", __FUNCTION__, node->bss_info->SSID));
+		cur = node;
+		node = cur->next;
+		kfree(cur);
+	}
+	*spec_scan_head = NULL;
+	mutex_unlock(&wl_cache_lock);
+}
+
+
+
+static int
+wl_iw_run_ss_cache_timer(int kick_off)
+{
+	struct timer_list **timer;
+
+	timer = &g_ss_cache_ctrl.m_timer;
+
+	if (*timer) {
+		if (kick_off) {
+			(*timer)->expires = jiffies + 30000 * HZ / 1000;	
+			add_timer(*timer);
+			WL_TRACE(("%s : timer starts \n", __FUNCTION__));
+		} else {
+			del_timer_sync(*timer);
+			WL_TRACE(("%s : timer stops \n", __FUNCTION__));
+		}
+	}
+
+	return 0;
+}
+
+
+void
+wl_iw_release_ss_cache_ctrl(void)
+{
+	WL_TRACE(("%s :\n", __FUNCTION__));
+	wl_iw_free_ss_cache();
+	wl_iw_run_ss_cache_timer(0);
+	if (g_ss_cache_ctrl.m_timer) {
+		kfree(g_ss_cache_ctrl.m_timer);
+	}
+}
+
+
+
+static void
+wl_iw_reset_ss_cache(void)
+{
+	wl_iw_ss_cache_t *node, *prev, *cur;
+	wl_iw_ss_cache_t **spec_scan_head;
+
+	mutex_lock(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+	node = *spec_scan_head;
+	prev = node;
+
+	for (;node;) {
+		WL_TRACE(("%s : node SSID %s \n", __FUNCTION__, node->bss_info->SSID));
+		if (!node->dirty) {
+			cur = node;
+			if (cur == *spec_scan_head) {
+				*spec_scan_head = cur->next;
+				prev = *spec_scan_head;
+			}
+			else {
+				prev->next = cur->next;
+			}
+			node = cur->next;
+
+			WL_TRACE(("%s : Del node : SSID %s\n", __FUNCTION__, cur->bss_info->SSID));
+			kfree(cur);
+			continue;
+		}
+
+		node->dirty = 0;
+		prev = node;
+		node = node->next;
+	}
+	mutex_unlock(&wl_cache_lock);
+}
+
+
+static int
+wl_iw_add_bss_to_ss_cache(wl_scan_results_t *ss_list)
+{
+
+	wl_iw_ss_cache_t *node, *prev, *leaf;
+	wl_iw_ss_cache_t **spec_scan_head;
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	if (!ss_list->count) {
+		return 0;
+	}
+
+	mutex_lock(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+
+	for (i = 0; i < ss_list->count; i++) {
+
+		node = *spec_scan_head;
+		prev = node;
+
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+
+		WL_TRACE(("%s : find %d with specific SSID %s\n", __FUNCTION__, i, bi->SSID));
+		for (;node;) {
+			if (!memcmp(&node->bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+				
+				WL_TRACE(("dirty marked : SSID %s\n", bi->SSID));
+				node->dirty = 1;
+				break;
+			}
+			prev = node;
+			node = node->next;
+		}
+
+		if (node) {
+			continue;
+		}
+		leaf = kmalloc(bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL);
+		if (!leaf) {
+			WL_ERROR(("Memory alloc failure %d\n", \
+				bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN));
+			mutex_unlock(&wl_cache_lock);
+			return -ENOMEM;
+		}
+
+		memcpy(leaf->bss_info, bi, bi->length);
+		leaf->next = NULL;
+		leaf->dirty = 1;
+		leaf->count = 1;
+		leaf->version = ss_list->version;
+
+		if (!prev) {
+			*spec_scan_head = leaf;
+		}
+		else {
+			prev->next = leaf;
+		}
+	}
+	mutex_unlock(&wl_cache_lock);
+	return 0;
+}
+
+
+static int
+wl_iw_merge_scan_cache(struct iw_request_info *info, char *extra, uint buflen_from_user,
+__u16 *merged_len)
+{
+	wl_iw_ss_cache_t *node;
+	wl_scan_results_t *list_merge;
+
+	mutex_lock(&wl_cache_lock);
+	node = g_ss_cache_ctrl.m_cache_head;
+	for (;node;) {
+		list_merge = (wl_scan_results_t *)&node->buflen;
+		WL_TRACE(("%s: Cached Specific APs list=%d\n", __FUNCTION__, list_merge->count));
+		if (buflen_from_user - *merged_len > 0) {
+			*merged_len += (__u16) wl_iw_get_scan_prep(list_merge, info,
+				extra + *merged_len, buflen_from_user - *merged_len);
+		}
+		else {
+			WL_TRACE(("%s: exit with break\n", __FUNCTION__));
+			break;
+		}
+		node = node->next;
+	}
+	mutex_unlock(&wl_cache_lock);
+	return 0;
+}
+
+
+static int
+wl_iw_delete_bss_from_ss_cache(void *addr)
+{
+
+	wl_iw_ss_cache_t *node, *prev;
+	wl_iw_ss_cache_t **spec_scan_head;
+
+	mutex_lock(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+	node = *spec_scan_head;
+	prev = node;
+	for (;node;) {
+		if (!memcmp(&node->bss_info->BSSID, addr, ETHER_ADDR_LEN)) {
+			if (node == *spec_scan_head) {
+				*spec_scan_head = node->next;
+			}
+			else {
+				prev->next = node->next;
+			}
+
+			WL_TRACE(("%s : Del node : %s\n", __FUNCTION__, node->bss_info->SSID));
+			kfree(node);
+			break;
+		}
+
+		prev = node;
+		node = node->next;
+	}
+
+	memset(addr, 0, ETHER_ADDR_LEN);
+	mutex_unlock(&wl_cache_lock);
+	return 0;
+}
+
+#endif
+
+
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error;
+	WL_TRACE(("%s dev:%s: SIOCSIWSCAN : SCAN\n", __FUNCTION__, dev->name));
+
+#if defined(CSCAN)
+	WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+	return -EINVAL;
+#endif 
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		return 0;
+	}
+#endif
+
+	if (g_onoff == G_WLAN_SET_OFF)
+		return 0;
+
+	memset(&g_specific_ssid, 0, sizeof(g_specific_ssid));
+#ifndef WL_IW_USE_ISCAN
+	g_scan_specified_ssid = 0;
+#endif 
+
+#if WIRELESS_EXT > 17
+	
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+#if defined(CONFIG_FIRST_SCAN)
+			if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+				WL_ERROR(("%s Ignoring SC %s first BC is not done = %d\n", \
+						__FUNCTION__, req->essid, \
+						g_first_broadcast_scan));
+				return -EBUSY;
+			}
+#endif
+			if (g_scan_specified_ssid) {
+				WL_SCAN(("%s Specific SCAN is not done ignore scan for = %s \n", \
+					__FUNCTION__, req->essid));
+				return -EBUSY;
+			}
+			else {
+				g_specific_ssid.SSID_len = MIN(sizeof(g_specific_ssid.SSID), \
+										req->essid_len);
+				memcpy(g_specific_ssid.SSID, req->essid, g_specific_ssid.SSID_len);
+				g_specific_ssid.SSID_len = htod32(g_specific_ssid.SSID_len);
+				g_scan_specified_ssid = 1;
+				WL_TRACE(("### Specific scan ssid=%s len=%d\n", \
+						g_specific_ssid.SSID, g_specific_ssid.SSID_len));
+			}
+		}
+	}
+#endif 
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) {
+		WL_SCAN(("Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error));
+		g_scan_specified_ssid = 0;
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+#ifdef WL_IW_USE_ISCAN
+int
+wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+
+#if defined(CONFIG_FIRST_SCAN)
+	if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) {
+		g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED;
+		WL_SCAN(("%s: First Brodcast scan was forced\n", __FUNCTION__));
+	}
+	else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) {
+		WL_SCAN(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__));
+		return 0;
+	}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (flag)
+		rtnl_lock();
+#endif
+
+	dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag));
+	wl_iw_set_event_mask(dev);
+
+	WL_SCAN(("+++: Set Broadcast ISCAN\n"));
+	
+	memset(&ssid, 0, sizeof(ssid));
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+
+	memset(&iscan->iscan_ex_params_p->params, 0, iscan->iscan_ex_param_size);
+	wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid);
+	wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (flag)
+		rtnl_unlock();
+#endif
+
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+	iscan->timer_on = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+	int ret = 0;
+
+	WL_SCAN(("%s: SIOCSIWSCAN : ISCAN\n", dev->name));
+
+#if defined(CSCAN)
+	WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+	return -EINVAL;
+#endif
+
+	net_os_wake_lock(dev);
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_SCAN(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		goto set_scan_end;
+	}
+#endif
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_SCAN(("%s: driver is not up yet after START\n", __FUNCTION__));
+		goto set_scan_end;
+	}
+
+#ifdef PNO_SUPPORT
+	if  (dhd_dev_get_pno_status(dev)) {
+		WL_SCAN(("%s: Scan called when PNO is active\n", __FUNCTION__));
+	}
+#endif
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		WL_ERROR(("%s error\n", __FUNCTION__));
+		goto set_scan_end;
+	}
+
+	if (g_scan_specified_ssid) {
+		WL_SCAN(("%s Specific SCAN already running ignoring BC scan\n", \
+				__FUNCTION__));
+		ret = EBUSY;
+		goto set_scan_end;
+	}
+
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			int as = 0;
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+			dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+			wl_iw_set_event_mask(dev);
+			ret = wl_iw_set_scan(dev, info, wrqu, extra);
+			goto set_scan_end;
+		}
+		else {
+			g_scan_specified_ssid = 0;
+
+			if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+				WL_SCAN(("%s ISCAN already in progress \n", __FUNCTION__));
+				goto set_scan_end;
+			}
+		}
+	}
+#endif 
+
+#if defined(CONFIG_FIRST_SCAN) && !defined(CSCAN)
+	if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+		if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+			WL_ERROR(("%s Clean up First scan flag which is %d\n", \
+				 __FUNCTION__, g_first_broadcast_scan));
+			g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+		}
+		else {
+			WL_ERROR(("%s Ignoring Broadcast Scan:First Scan is not done yet %d\n", \
+					__FUNCTION__, g_first_counter_scans));
+			ret = -EBUSY;
+			goto set_scan_end;
+		}
+	}
+#endif
+
+	wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+
+set_scan_end:
+	net_os_wake_unlock(dev);
+	return ret;
+}
+#endif 
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+	uint8 *ie = *wpaie;
+
+	if ((ie[1] >= 6) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+		return TRUE;
+	}
+
+	ie += ie[1] + 2;
+	
+	*tlvs_len -= (int)(ie - *tlvs);
+	
+	*tlvs = ie;
+	return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+	uint8 *ie = *wpsie;
+
+	if ((ie[1] >= 4) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+		return TRUE;
+	}
+
+	ie += ie[1] + 2;
+	
+	*tlvs_len -= (int)(ie - *tlvs);
+	
+	*tlvs = ie;
+	return FALSE;
+}
+#endif 
+
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+	size_t len, int uppercase)
+{
+	size_t i;
+	char *pos = buf, *end = buf + buf_size;
+	int ret;
+	if (buf_size == 0)
+		return 0;
+	for (i = 0; i < len; i++) {
+		ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+			data[i]);
+		if (ret < 0 || ret >= end - pos) {
+			end[-1] = '\0';
+			return pos - buf;
+		}
+		pos += ret;
+	}
+	end[-1] = '\0';
+	return pos - buf;
+}
+
+
+int wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+	return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+	struct iw_event	iwe;
+	char *event;
+	char *buf;
+	int custom_event_len;
+
+	event = *event_p;
+	if (bi->ie_length) {
+		
+		bcm_tlv_t *ie;
+		uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		int ptr_len = bi->ie_length;
+
+#ifdef BCMWPA2
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+#endif 
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			
+			if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+			WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else 
+			iwe.cmd = IWEVCUSTOM;
+			custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+			iwe.u.data.length = custom_event_len;
+
+			buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+			if (buf == NULL)
+			{
+				WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+				break;
+			}
+
+			memcpy(buf, "wapi_ie=", 8);
+			wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+			wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+			wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+			kfree(buf);
+#endif 
+			break;
+		}
+	*event_p = event;
+	}
+#endif 
+
+	return 0;
+}
+
+#ifndef CSCAN
+static uint
+wl_iw_get_scan_prep(
+	wl_scan_results_t *list,
+	struct iw_request_info *info,
+	char *extra,
+	short max_size)
+{
+	int  i, j;
+	struct iw_event  iwe;
+	wl_bss_info_t *bi = NULL;
+	char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+	int	ret = 0;
+	int channel;
+
+	if (!list) {
+		WL_ERROR(("%s: Null list pointer",__FUNCTION__));
+		return ret;
+	}
+
+	for (i = 0; i < list->count && i < IW_MAX_AP; i++)
+	{
+		if (list->version != WL_BSS_INFO_VERSION) {
+			WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", \
+				__FUNCTION__, list->version));
+			return ret;
+		}
+
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+		WL_TRACE(("%s : %s\n", __FUNCTION__, bi->SSID));
+
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+		
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		iwe.cmd = SIOCGIWFREQ;
+		channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+		iwe.u.freq.m = wf_channel2mhz(channel,
+			channel <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		if (bi->rateset.count) {
+			if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) {
+				value = event + IW_EV_LCP_LEN;
+				iwe.cmd = SIOCGIWRATE;
+
+				iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+				for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+					iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+					value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+						IW_EV_PARAM_LEN);
+				}
+				event = value;
+			}
+		}
+	} 
+
+	if ((ret = (event - extra)) < 0) {
+		WL_ERROR(("==> Wrong size\n"));
+		ret = 0;
+	}
+	WL_TRACE(("%s: size=%d bytes prepared \n", __FUNCTION__, (unsigned int)(event - extra)));
+	return (uint)ret;
+}
+
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	wl_scan_results_t *list_merge;
+	wl_scan_results_t *list = (wl_scan_results_t *) g_scan;
+	int error;
+	uint buflen_from_user = dwrq->length;
+	uint len =  G_SCAN_RESULTS;
+	__u16 len_ret = 0;
+#if !defined(CSCAN)
+	__u16 merged_len = 0;
+#endif
+#if defined(WL_IW_USE_ISCAN)
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+#if  !defined(CSCAN)
+	uint32 counter = 0;
+#endif
+#endif
+	WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user));
+
+	if (!extra) {
+		WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name));
+		return -EINVAL;
+	}
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+	ci.scan_channel = dtoh32(ci.scan_channel);
+	if (ci.scan_channel)
+		return -EAGAIN;
+
+#if !defined(CSCAN)
+	if (g_ss_cache_ctrl.m_timer_expired) {
+		wl_iw_free_ss_cache();
+		g_ss_cache_ctrl.m_timer_expired ^= 1;
+	}
+	if ((!g_scan_specified_ssid && g_ss_cache_ctrl.m_prev_scan_mode) ||
+		g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+		g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+		
+		wl_iw_reset_ss_cache();
+	}
+	g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+	if (g_scan_specified_ssid) {
+		g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+	}
+	else {
+		g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+	}
+#endif
+
+	if (g_scan_specified_ssid) {
+		
+		list = kmalloc(len, GFP_KERNEL);
+		if (!list) {
+			WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n", dev->name));
+			g_scan_specified_ssid = 0;
+			return -ENOMEM;
+		}
+	}
+
+	memset(list, 0, len);
+	list->buflen = htod32(len);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len))) {
+		WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name, __FUNCTION__, error));
+		dwrq->length = len;
+		if (g_scan_specified_ssid) {
+			g_scan_specified_ssid = 0;
+			kfree(list);
+		}
+		return 0;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+				__FUNCTION__, list->version));
+		if (g_scan_specified_ssid) {
+			g_scan_specified_ssid = 0;
+			kfree(list);
+		}
+		return -EINVAL;
+	}
+
+#if !defined(CSCAN)
+	if (g_scan_specified_ssid) {
+		
+		wl_iw_add_bss_to_ss_cache(list);
+		kfree(list);
+	}
+
+	mutex_lock(&wl_cache_lock);
+#if defined(WL_IW_USE_ISCAN)
+	if (g_scan_specified_ssid)
+		WL_TRACE(("%s: Specified scan APs from scan=%d\n", __FUNCTION__, list->count));
+	p_buf = iscan->list_hdr;
+	
+	while (p_buf != iscan->list_cur) {
+		list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+		WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+		counter += list_merge->count;
+		if (list_merge->count > 0)
+			len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+				extra+len_ret, buflen_from_user -len_ret);
+		p_buf = p_buf->next;
+	}
+	WL_TRACE(("%s merged with total Bcast APs=%d\n", __FUNCTION__, counter));
+#else
+	list_merge = (wl_scan_results_t *) g_scan;
+	len_ret = (__u16) wl_iw_get_scan_prep(list_merge, info, extra, buflen_from_user);
+#endif
+	mutex_unlock(&wl_cache_lock);
+	if (g_ss_cache_ctrl.m_link_down) {
+		wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+	}
+
+	wl_iw_merge_scan_cache(info, extra+len_ret, buflen_from_user-len_ret, &merged_len);
+	len_ret += merged_len;
+	wl_iw_run_ss_cache_timer(0);
+	wl_iw_run_ss_cache_timer(1);
+#else
+
+	if (g_scan_specified_ssid) {
+		WL_TRACE(("%s: Specified scan APs in the list =%d\n", __FUNCTION__, list->count));
+		len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+		kfree(list);
+
+#if defined(WL_IW_USE_ISCAN)
+		p_buf = iscan->list_hdr;
+		
+		while (p_buf != iscan->list_cur) {
+			list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+			WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+			if (list_merge->count > 0)
+				len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+				    extra+len_ret, buflen_from_user -len_ret);
+			p_buf = p_buf->next;
+		}
+#else
+		list_merge = (wl_scan_results_t *) g_scan;
+		WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+		if (list_merge->count > 0)
+			len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, extra+len_ret,
+				buflen_from_user -len_ret);
+#endif
+	}
+	else {
+		list = (wl_scan_results_t *) g_scan;
+		len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+	}
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+	
+	g_scan_specified_ssid = 0;
+#endif 
+	
+	if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user)
+		len = len_ret;
+
+	dwrq->length = len;
+	dwrq->flags = 0;	
+
+	WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, list->count));
+	return 0;
+}
+#endif
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+wl_iw_iscan_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int ii, j;
+	int apcnt;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+	uint32  counter = 0;
+	uint8   channel;
+#if !defined(CSCAN)
+	__u16 merged_len = 0;
+	uint buflen_from_user = dwrq->length;
+#endif
+
+	WL_SCAN(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length));
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		return -EINVAL;
+	}
+#endif
+
+	if (!extra) {
+		WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n", dev->name));
+		return -EINVAL;
+	}
+
+#if defined(CONFIG_FIRST_SCAN)
+	if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) {
+		WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n", \
+			 dev->name, __FUNCTION__));
+		return -EAGAIN;
+	}
+#endif
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		WL_ERROR(("%ssysioc_pid\n", __FUNCTION__));
+		return -EAGAIN;
+	}
+
+#if !defined(CSCAN)
+	if (g_ss_cache_ctrl.m_timer_expired) {
+		wl_iw_free_ss_cache();
+		g_ss_cache_ctrl.m_timer_expired ^= 1;
+	}
+	if (g_scan_specified_ssid) {
+		return wl_iw_get_scan(dev, info, dwrq, extra);
+	}
+	else {
+		if (g_ss_cache_ctrl.m_link_down) {
+			wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+		}
+		if (g_ss_cache_ctrl.m_prev_scan_mode || g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+			g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+
+			wl_iw_reset_ss_cache();
+		}
+		g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+		g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+	}
+#endif
+
+	WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name));
+	apcnt = 0;
+	p_buf = iscan->list_hdr;
+	
+	while (p_buf != iscan->list_cur) {
+	    list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+	    counter += list->count;
+
+	    if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+			 __FUNCTION__, list->version));
+		return -EINVAL;
+	    }
+
+	    bi = NULL;
+	    for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+		if ((dtoh32(bi->length) > WLC_IW_ISCAN_MAXLEN) ||
+		    (((uintptr)bi + dtoh32(bi->length)) > ((uintptr)list + WLC_IW_ISCAN_MAXLEN))) {
+			WL_ERROR(("%s: Scan results out of bounds: %u\n",__FUNCTION__,dtoh32(bi->length)));
+			return -E2BIG;
+		}
+
+		if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+			IW_EV_QUAL_LEN >= end)
+			return -E2BIG;
+
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		iwe.cmd = SIOCGIWFREQ;
+		channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+		iwe.u.freq.m = wf_channel2mhz(channel,
+			channel <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		if (bi->rateset.count) {
+			if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+				return -E2BIG;
+
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	    }
+	    p_buf = p_buf->next;
+	} 
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	
+
+#if !defined(CSCAN)
+	wl_iw_merge_scan_cache(info, event, buflen_from_user - dwrq->length, &merged_len);
+	dwrq->length += merged_len;
+	wl_iw_run_ss_cache_timer(0);
+	wl_iw_run_ss_cache_timer(1);
+#endif /* CSCAN */
+#if defined(CONFIG_FIRST_SCAN)
+	g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+#endif
+
+	WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter));
+
+	return 0;
+}
+#endif 
+
+static int
+wl_iw_set_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	int error;
+	wl_join_params_t join_params;
+	int join_params_size;
+
+	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+	
+	memset(&g_ssid, 0, sizeof(g_ssid));
+
+	CHECK_EXTRA_FOR_NULL(extra);
+
+	if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+		g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length);
+#else
+		g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length-1);
+#endif
+		memcpy(g_ssid.SSID, extra, g_ssid.SSID_len);
+	} else {
+		
+		g_ssid.SSID_len = 0;
+	}
+	g_ssid.SSID_len = htod32(g_ssid.SSID_len);
+
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	memcpy(&join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
+	memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+
+	wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) {
+		WL_ERROR(("Invalid ioctl data=%d\n", error));
+		return error;
+	}
+
+	if (g_ssid.SSID_len) {
+		WL_TRACE(("%s: join SSID=%s ch=%d\n", __FUNCTION__, \
+			g_ssid.SSID,  g_wl_iw_params.target_channel));
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+		WL_ERROR(("Error getting the SSID\n"));
+		return error;
+	}
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+	dwrq->length = ssid.SSID_len;
+
+	dwrq->flags = 1; 
+
+	return 0;
+}
+
+static int
+wl_iw_set_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if (dwrq->length > sizeof(iw->nickname))
+		return -E2BIG;
+
+	memcpy(iw->nickname, extra, dwrq->length);
+	iw->nickname[dwrq->length - 1] = '\0';
+
+	return 0;
+}
+
+static int
+wl_iw_get_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	strcpy(extra, iw->nickname);
+	dwrq->length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int wl_iw_set_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	wl_rateset_t rateset;
+	int error, rate, i, error_bg, error_a;
+
+	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+
+	rateset.count = dtoh32(rateset.count);
+
+	if (vwrq->value < 0) {
+		
+		rate = rateset.rates[rateset.count - 1] & 0x7f;
+	} else if (vwrq->value < rateset.count) {
+		
+		rate = rateset.rates[vwrq->value] & 0x7f;
+	} else {
+		
+		rate = vwrq->value / 500000;
+	}
+
+	if (vwrq->fixed) {
+		
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+		error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+	} else {
+		
+		
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+		
+		error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+
+		
+		for (i = 0; i < rateset.count; i++)
+			if ((rateset.rates[i] & 0x7f) > rate)
+				break;
+		rateset.count = htod32(i);
+
+		
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+			return error;
+	}
+
+	return 0;
+}
+
+static int wl_iw_get_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rate;
+
+	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+		return error;
+	rate = dtoh32(rate);
+	vwrq->value = rate * 500000;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+	if (vwrq->disabled)
+		rts = DOT11_DEFAULT_RTS_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+		return -EINVAL;
+	else
+		rts = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+		return error;
+
+	vwrq->value = rts;
+	vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, frag;
+
+	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+	if (vwrq->disabled)
+		frag = DOT11_DEFAULT_FRAG_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+		return -EINVAL;
+	else
+		frag = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, fragthreshold;
+
+	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+		return error;
+
+	vwrq->value = fragthreshold;
+	vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable;
+	uint16 txpwrmw;
+	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+	
+	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+	disable += WL_RADIO_SW_DISABLE << 16;
+
+	disable = htod32(disable);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+		return error;
+
+	
+	if (disable & WL_RADIO_SW_DISABLE)
+		return 0;
+
+	
+	if (!(vwrq->flags & IW_TXPOW_MWATT))
+		return -EINVAL;
+
+	
+	if (vwrq->value < 0)
+		return 0;
+
+	if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+	else txpwrmw = (uint16)vwrq->value;
+
+
+	error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+	return error;
+}
+
+static int
+wl_iw_get_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable, txpwrdbm;
+	uint8 result;
+
+	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+	    (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+		return error;
+
+	disable = dtoh32(disable);
+	result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	vwrq->value = (int32)bcm_qdbm_to_mw(result);
+	vwrq->fixed = 0;
+	vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+	vwrq->flags = IW_TXPOW_MWATT;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+	
+	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+		return -EINVAL;
+
+	
+	if (vwrq->flags & IW_RETRY_LIMIT) {
+
+		
+#if WIRELESS_EXT > 20
+	if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+		!((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+	if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif 
+			lrl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+				return error;
+		}
+
+		
+#if WIRELESS_EXT > 20
+	if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+		!((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif 
+			srl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+				return error;
+		}
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+	vwrq->disabled = 0;      
+
+	
+	if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+		return error;
+
+	lrl = dtoh32(lrl);
+	srl = dtoh32(srl);
+
+	
+	if (vwrq->flags & IW_RETRY_MAX) {
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+		vwrq->value = lrl;
+	} else {
+		vwrq->flags = IW_RETRY_LIMIT;
+		vwrq->value = srl;
+		if (srl != lrl)
+			vwrq->flags |= IW_RETRY_MIN;
+	}
+
+	return 0;
+}
+#endif 
+
+static int
+wl_iw_set_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec;
+
+	WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = htod32(key.index);
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+		
+		if (key.index == DOT11_MAX_DEFAULT_KEYS)
+			key.index = 0;
+	} else {
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+		if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+			return -EINVAL;
+	}
+
+	
+	if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+		
+		val = htod32(key.index);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+			return error;
+	} else {
+		key.len = dwrq->length;
+
+		if (dwrq->length > sizeof(key.data))
+			return -EINVAL;
+
+		memcpy(key.data, extra, dwrq->length);
+
+		key.flags = WL_PRIMARY_KEY;
+		switch (key.len) {
+		case WEP1_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP1;
+			break;
+		case WEP128_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP128;
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+		case TKIP_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_TKIP;
+			break;
+#endif
+		case AES_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		
+		swap_key_from_BE(&key);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+			return error;
+	}
+
+	
+	val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+	if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+		return error;
+
+	wsec  &= ~(WEP_ENABLED);
+	wsec |= val;
+
+	if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+		return error;
+
+	
+	val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+	val = htod32(val);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec, auth;
+
+	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+	
+	bzero(&key, sizeof(wl_wsec_key_t));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = key.index;
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+	} else
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+		key.index = 0;
+
+	
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+		return error;
+
+	swap_key_to_BE(&key);
+
+	wsec = dtoh32(wsec);
+	auth = dtoh32(auth);
+	
+	dwrq->length = MIN(DOT11_MAX_KEY_SIZE, key.len);
+
+	
+	dwrq->flags = key.index + 1;
+	if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+		
+		dwrq->flags |= IW_ENCODE_DISABLED;
+	}
+	if (auth) {
+		
+		dwrq->flags |= IW_ENCODE_RESTRICTED;
+	}
+
+	
+	if (dwrq->length && extra)
+		memcpy(extra, key.data, dwrq->length);
+
+	return 0;
+}
+
+static int
+wl_iw_set_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+	pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+	pm = htod32(pm);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+		return error;
+
+	pm = dtoh32(pm);
+	vwrq->disabled = pm ? 0 : 1;
+	vwrq->flags = IW_POWER_ALL_R;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	uchar buf[WLC_IOCTL_SMLEN] = {0};
+	uchar *p = buf;
+	int wapi_ie_size;
+
+	WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+	CHECK_EXTRA_FOR_NULL(extra);
+
+	if (extra[0] == DOT11_MNG_WAPI_ID)
+	{
+		wapi_ie_size = iwp->length;
+		memcpy(p, extra, iwp->length);
+		dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+	}
+	else
+		dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	iwp->length = 64;
+	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+	return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error;
+	struct iw_encode_ext *iwe;
+
+	WL_WSEC(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+	CHECK_EXTRA_FOR_NULL(extra);
+
+	memset(&key, 0, sizeof(key));
+	iwe = (struct iw_encode_ext *)extra;
+
+	
+	if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+	}
+
+	
+	key.index = 0;
+	if (dwrq->flags & IW_ENCODE_INDEX)
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	key.len = iwe->key_len;
+
+	
+	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+	
+	if (key.len == 0) {
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+			
+			key.index = htod32(key.index);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+				&key.index, sizeof(key.index));
+			if (error)
+				return error;
+		}
+		
+		else {
+			swap_key_from_BE(&key);
+			dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		}
+	}
+	else {
+		if (iwe->key_len > sizeof(key.data))
+			return -EINVAL;
+
+		WL_WSEC(("Setting the key index %d\n", key.index));
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("key is a Primary Key\n"));
+			key.flags = WL_PRIMARY_KEY;
+		}
+
+		bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+		if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+			uint8 keybuf[8];
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+
+		
+		if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+			uchar *ivptr;
+			ivptr = (uchar *)iwe->rx_seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = TRUE;
+		}
+
+		switch (iwe->alg) {
+			case IW_ENCODE_ALG_NONE:
+				key.algo = CRYPTO_ALGO_OFF;
+				break;
+			case IW_ENCODE_ALG_WEP:
+				if (iwe->key_len == WEP1_KEY_SIZE)
+					key.algo = CRYPTO_ALGO_WEP1;
+				else
+					key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			case IW_ENCODE_ALG_TKIP:
+				key.algo = CRYPTO_ALGO_TKIP;
+				break;
+			case IW_ENCODE_ALG_CCMP:
+				key.algo = CRYPTO_ALGO_AES_CCM;
+				break;
+			case IW_ENCODE_ALG_SM4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+					key.flags &= ~WL_PRIMARY_KEY;
+				}
+				break;
+			default:
+				break;
+		}
+		swap_key_from_BE(&key);
+
+		dhd_wait_pend8021x(dev);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		if (error)
+			return error;
+	}
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+#ifdef BCMWPA2
+struct {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+
+static int
+wl_iw_set_pmksa(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	struct iw_pmksa *iwpmksa;
+	uint i;
+	int ret = 0;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name));
+	CHECK_EXTRA_FOR_NULL(extra);
+
+	iwpmksa = (struct iw_pmksa *)extra;
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+
+	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+		WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+		bzero((char *)&pmkid_list, sizeof(pmkid_list));
+	}
+
+	else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+		{
+			pmkid_list_t pmkid, *pmkidptr;
+			uint j;
+			pmkidptr = &pmkid;
+
+			bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+			bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+
+			WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+			WL_WSEC(("\n"));
+		}
+
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+
+		if ((pmkid_list.pmkids.npmkid > 0) && (i < pmkid_list.pmkids.npmkid)) {
+			bzero(&pmkid_list.pmkids.pmkid[i], sizeof(pmkid_t));
+			for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
+				bcopy(&pmkid_list.pmkids.pmkid[i+1].BSSID,
+					&pmkid_list.pmkids.pmkid[i].BSSID,
+					ETHER_ADDR_LEN);
+				bcopy(&pmkid_list.pmkids.pmkid[i+1].PMKID,
+					&pmkid_list.pmkids.pmkid[i].PMKID,
+					WPA2_PMKID_LEN);
+			}
+			pmkid_list.pmkids.npmkid--;
+		}
+		else
+			ret = -EINVAL;
+	}
+
+	else if (iwpmksa->cmd == IW_PMKSA_ADD) {
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+		if (i < MAXPMKID) {
+			bcopy(&iwpmksa->bssid.sa_data[0],
+				&pmkid_list.pmkids.pmkid[i].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&iwpmksa->pmkid[0], &pmkid_list.pmkids.pmkid[i].PMKID,
+				WPA2_PMKID_LEN);
+			if (i == pmkid_list.pmkids.npmkid)
+				pmkid_list.pmkids.npmkid++;
+		}
+		else
+			ret = -EINVAL;
+
+		{
+			uint j;
+			uint k;
+			k = pmkid_list.pmkids.npmkid;
+			WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[k].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[k].PMKID[j]));
+			WL_WSEC(("\n"));
+		}
+	}
+	WL_WSEC(("PRINTING pmkid LIST - No of elements %d, ret = %d\n", pmkid_list.pmkids.npmkid, ret));
+	for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+		uint j;
+		WL_WSEC(("PMKID[%d]: %s = ", i,
+			bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[i].BSSID,
+			eabuf)));
+		for (j = 0; j < WPA2_PMKID_LEN; j++)
+			WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]));
+		WL_WSEC(("\n"));
+	}
+	WL_WSEC(("\n"));
+
+	if (!ret)
+		ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+	return ret;
+}
+#endif 
+#endif 
+
+static int
+wl_iw_get_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	WL_WSEC(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error = 0;
+	int paramid;
+	int paramval;
+	int val = 0;
+	wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+	WL_WSEC(("%s: SIOCSIWAUTH\n", dev->name));
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		return 0;
+	}
+#endif
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+	paramval = vwrq->value;
+
+	WL_WSEC(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+		dev->name, paramid, paramval));
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		
+		if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+			val = WPA_AUTH_DISABLED;
+		else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+			val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+#ifdef BCMWPA2
+		else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#endif 
+		else if (paramval & IW_AUTH_WAPI_VERSION_1)
+			val = WPA_AUTH_WAPI;
+		WL_WSEC(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+		
+		
+		if (paramval & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+			val = 	WEP_ENABLED;
+		if (paramval & IW_AUTH_CIPHER_TKIP)
+			val = TKIP_ENABLED;
+		if (paramval & IW_AUTH_CIPHER_CCMP)
+			val = AES_ENABLED;
+		if (paramval & IW_AUTH_CIPHER_SMS4)
+			val = SMS4_ENABLED;
+
+		if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+			iw->pwsec = val;
+			val |= iw->gwsec;
+		}
+		else {
+			iw->gwsec = val;
+			val |= iw->pwsec;
+		}
+
+		if (iw->privacy_invoked && !val) {
+			WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+				"we're a WPS enrollee\n", dev->name, __FUNCTION__));
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+				WL_ERROR(("Failed to set iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else if (val) {
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_ERROR(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		}
+
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+			WL_ERROR(("Failed to set 'wsec'iovar\n"));
+			return error;
+		}
+
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
+			WL_ERROR(("Failed to get 'wpa_auth'iovar\n"));
+			return error;
+		}
+
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			if (paramval & IW_AUTH_KEY_MGMT_PSK)
+				val = WPA_AUTH_PSK;
+			else
+				val = WPA_AUTH_UNSPECIFIED;
+		}
+#ifdef BCMWPA2
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			if (paramval & IW_AUTH_KEY_MGMT_PSK)
+				val = WPA2_AUTH_PSK;
+			else
+				val = WPA2_AUTH_UNSPECIFIED;
+		}
+#endif 
+		if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+			val = WPA_AUTH_WAPI;
+		WL_WSEC(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) {
+			WL_ERROR(("Failed to set 'wpa_auth'iovar\n"));
+			return error;
+		}
+
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		if ((error = dev_wlc_bufvar_set(dev, "tkip_countermeasures", \
+						(char *)&paramval, sizeof(paramval))))
+			WL_WSEC(("%s: tkip_countermeasures failed %d\n", __FUNCTION__, error));
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		
+		WL_WSEC(("Setting the D11auth %d\n", paramval));
+		if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
+			val = 0;
+		else if (paramval == IW_AUTH_ALG_SHARED_KEY)
+			val = 1;
+		else if (paramval == (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY))
+			val = 2;
+		else
+			error = 1;
+		if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (paramval == 0) {
+			iw->pwsec = 0;
+			iw->gwsec = 0;
+			if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
+				WL_ERROR(("Failed to get 'wsec'iovar\n"));
+				return error;
+			}
+			if (val & (TKIP_ENABLED | AES_ENABLED)) {
+				val &= ~(TKIP_ENABLED | AES_ENABLED);
+				dev_wlc_intvar_set(dev, "wsec", val);
+			}
+			val = 0;
+
+			WL_INFORM(("%s: %d: setting wpa_auth to %d\n",
+				__FUNCTION__, __LINE__, val));
+			error = dev_wlc_intvar_set(dev, "wpa_auth", 0);
+			if (error)
+				WL_ERROR(("Failed to set 'wpa_auth'iovar\n"));
+			return error;
+		}
+
+		
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		error = dev_wlc_bufvar_set(dev, "wsec_restrict", \
+				   (char *)&paramval, sizeof(paramval));
+		if (error)
+			WL_ERROR(("%s: wsec_restrict %d\n", __FUNCTION__, error));
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		error = dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", \
+				   (char *)&paramval, sizeof(paramval));
+		if (error)
+			WL_WSEC(("%s: rx_unencrypted_eapol %d\n", __FUNCTION__, error));
+		break;
+
+#if WIRELESS_EXT > 17
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		
+		break;
+	case IW_AUTH_PRIVACY_INVOKED: {
+		int wsec;
+
+		if (paramval == 0) {
+			iw->privacy_invoked = FALSE;
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else {
+			iw->privacy_invoked = TRUE;
+			if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+				return error;
+
+			if (!(IW_WSEC_ENABLED(wsec))) {
+
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+					WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			} else {
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+					WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			}
+		}
+		break;
+	}
+#endif
+	case IW_AUTH_WAPI_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+		if (paramval) {
+			val |= SMS4_ENABLED;
+			if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+				WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+					__FUNCTION__, val, error));
+				return error;
+			}
+			if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WPA_AUTH_WAPI))) {
+				WL_ERROR(("%s: setting wpa_auth(WPA_AUTH_WAPI) returned %d\n",
+					__FUNCTION__, error));
+				return error;
+			}
+		}
+
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+#ifdef BCMWPA2
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+#else
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK))
+#endif 
+
+static int
+wl_iw_get_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error;
+	int paramid;
+	int paramval = 0;
+	int val;
+	wl_iw_t *iw = *(wl_iw_t **)netdev_priv(dev);
+
+	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+			paramval = IW_AUTH_WPA_VERSION_DISABLED;
+		else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA;
+#ifdef BCMWPA2
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA2;
+#endif 
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+		if (paramid == IW_AUTH_CIPHER_PAIRWISE)
+			val = iw->pwsec;
+		else
+			val = iw->gwsec;
+
+		paramval = 0;
+		if (val) {
+			if (val & WEP_ENABLED)
+				paramval |= (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104);
+			if (val & TKIP_ENABLED)
+				paramval |= (IW_AUTH_CIPHER_TKIP);
+			if (val & AES_ENABLED)
+				paramval |= (IW_AUTH_CIPHER_CCMP);
+		}
+		else
+			paramval = IW_AUTH_CIPHER_NONE;
+		break;
+	case IW_AUTH_KEY_MGMT:
+		
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (VAL_PSK(val))
+			paramval = IW_AUTH_KEY_MGMT_PSK;
+		else
+			paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		error = dev_wlc_bufvar_get(dev, "tkip_countermeasures", \
+							(char *)&paramval, sizeof(paramval));
+		if (error)
+			WL_ERROR(("%s get tkip_countermeasures %d\n", __FUNCTION__, error));
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		error = dev_wlc_bufvar_get(dev, "wsec_restrict", \
+					   (char *)&paramval, sizeof(paramval));
+		if (error)
+			WL_ERROR(("%s get wsec_restrict %d\n", __FUNCTION__, error));
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		error = dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", \
+						   (char *)&paramval, sizeof(paramval));
+		if (error)
+			WL_ERROR(("%s get rx_unencrypted_eapol %d\n", __FUNCTION__, error));
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		
+		if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+			return error;
+		if (!val)
+			paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+		else
+			paramval = IW_AUTH_ALG_SHARED_KEY;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val)
+			paramval = TRUE;
+		else
+			paramval = FALSE;
+		break;
+#if WIRELESS_EXT > 17
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		paramval = iw->privacy_invoked;
+		break;
+#endif 
+	}
+	vwrq->value = paramval;
+	return 0;
+}
+#endif
+
+
+#ifdef SOFTAP
+
+static int ap_macmode = MACLIST_MODE_DISABLED;
+static struct mflist ap_black_list;
+static int
+wl_iw_parse_wep(char *keystr, wl_wsec_key_t *key)
+{
+	char hex[] = "XX";
+	unsigned char *data = key->data;
+
+	switch (strlen(keystr)) {
+	case 5:
+	case 13:
+	case 16:
+		key->len = strlen(keystr);
+		memcpy(data, keystr, key->len + 1);
+		break;
+	case 12:
+	case 28:
+	case 34:
+	case 66:
+		if (!strnicmp(keystr, "0x", 2))
+			keystr += 2;
+		else
+			return -1;
+	case 10:
+	case 26:
+	case 32:
+	case 64:
+		key->len = strlen(keystr) / 2;
+		while (*keystr) {
+			strncpy(hex, keystr, 2);
+			*data++ = (char) bcm_strtoul(hex, NULL, 16);
+			keystr += 2;
+		}
+		break;
+	default:
+		return -1;
+	}
+
+	switch (key->len) {
+	case 5:
+		key->algo = CRYPTO_ALGO_WEP1;
+		break;
+	case 13:
+		key->algo = CRYPTO_ALGO_WEP128;
+		break;
+	case 16:
+		key->algo = CRYPTO_ALGO_AES_CCM;
+		break;
+	case 32:
+		key->algo = CRYPTO_ALGO_TKIP;
+		break;
+	default:
+		return -1;
+	}
+
+	key->flags |= WL_PRIMARY_KEY;
+
+	return 0;
+}
+
+#ifdef EXT_WPA_CRYPTO
+#define SHA1HashSize 20
+extern void pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+				int iterations, u8 *buf, size_t buflen);
+
+#else
+
+#define SHA1HashSize 20
+int pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+				int iterations, u8 *buf, size_t buflen)
+{
+	WL_ERROR(("WARNING: %s is not implemented !!!\n", __FUNCTION__));
+	return -1;
+}
+
+#endif 
+
+
+int dev_iw_write_cfg1_bss_var(struct net_device *dev, int val)
+{
+	struct {
+		int cfg;
+		int val;
+	} bss_setbuf;
+
+	int bss_set_res;
+	char smbuf[WLC_IOCTL_SMLEN];
+	memset(smbuf, 0, sizeof(smbuf));
+
+	bss_setbuf.cfg = 1;
+	bss_setbuf.val = val;
+
+	bss_set_res = dev_iw_iovar_setbuf(dev, "bss",
+		&bss_setbuf, sizeof(bss_setbuf), smbuf, sizeof(smbuf));
+	WL_TRACE(("%s: bss_set_result:%d set with %d\n", __FUNCTION__, bss_set_res, val));
+
+	return bss_set_res;
+}
+
+
+int dev_iw_read_cfg1_bss_var(struct net_device *dev, int *val)
+{
+	int bsscfg_idx = 1;
+	int bss_set_res;
+	char smbuf[WLC_IOCTL_SMLEN];
+	memset(smbuf, 0, sizeof(smbuf));
+
+	bss_set_res = dev_iw_iovar_getbuf(dev, "bss", \
+		 &bsscfg_idx, sizeof(bsscfg_idx), smbuf, sizeof(smbuf));
+	*val = *(int*)smbuf;
+	*val = dtoh32(*val);
+	WL_TRACE(("%s: status=%d bss_get_result=%d\n", __FUNCTION__, bss_set_res, *val));
+	return bss_set_res;
+}
+
+
+#ifndef AP_ONLY
+static int wl_bssiovar_mkbuf(
+			const char *iovar,
+			int bssidx,
+			void *param,
+			int paramlen,
+			void *bufptr,
+			int buflen,
+			int *perr)
+{
+	const char *prefix = "bsscfg:";
+	int8 *p;
+	uint prefixlen;
+	uint namelen;
+	uint iolen;
+
+	prefixlen = strlen(prefix);
+	namelen = strlen(iovar) + 1;
+	iolen = prefixlen + namelen + sizeof(int) + paramlen;
+
+	if (buflen < 0 || iolen > (uint)buflen) {
+		*perr = BCME_BUFTOOSHORT;
+		return 0;
+	}
+
+	p = (int8 *)bufptr;
+
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	memcpy(p, iovar, namelen);
+	p += namelen;
+
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(int32));
+	p += sizeof(int32);
+
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	*perr = 0;
+	return iolen;
+}
+#endif 
+
+
+int get_user_params(char *user_params, struct iw_point *dwrq)
+{
+	int ret = 0;
+
+	if (copy_from_user(user_params, dwrq->pointer, dwrq->length)) {
+		WL_ERROR(("\n%s: no user params: uptr:%p, ulen:%d\n",
+			__FUNCTION__, dwrq->pointer, dwrq->length));
+		return -EFAULT;
+	}
+
+	WL_TRACE(("\n%s: iwpriv user params:%s\n", __FUNCTION__, user_params));
+
+	return ret;
+}
+
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+#if defined(CSCAN)
+
+static int
+wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, int nchan)
+{
+	int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
+	int err = 0;
+	char *p;
+	int i;
+	iscan_info_t *iscan = g_iscan;
+
+	WL_SCAN(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan));
+
+	if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) {
+		WL_ERROR(("%s error exit\n", __FUNCTION__));
+		err = -1;
+		goto exit;
+	}
+
+#ifdef PNO_SUPPORT
+	if  (dhd_dev_get_pno_status(dev)) {
+		WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__));
+	}
+#endif
+
+	params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+
+	if (nssid > 0) {
+		i = OFFSETOF(wl_scan_params_t, channel_list) + nchan * sizeof(uint16);
+		i = ROUNDUP(i, sizeof(uint32));
+		if (i + nssid * sizeof(wlc_ssid_t) > params_size) {
+			printf("additional ssids exceed params_size\n");
+			err = -1;
+			goto exit;
+		}
+
+		p = ((char*)&iscan->iscan_ex_params_p->params) + i;
+		memcpy(p, ssids_local, nssid * sizeof(wlc_ssid_t));
+		p += nssid * sizeof(wlc_ssid_t);
+	} else {
+		p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16);
+	}
+
+	iscan->iscan_ex_params_p->params.channel_num = \
+		htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) | \
+					(nchan & WL_SCAN_PARAMS_COUNT_MASK));
+
+	nssid = \
+	(uint)((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) & \
+		               WL_SCAN_PARAMS_COUNT_MASK);
+
+	params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t));
+	iscan->iscan_ex_param_size = params_size;
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+	wl_iw_set_event_mask(dev);
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+	iscan->timer_on = 1;
+
+#ifdef SCAN_DUMP
+	{
+		int i;
+		WL_SCAN(("\n### List of SSIDs to scan ###\n"));
+		for (i = 0; i < nssid; i++) {
+			if (!ssids_local[i].SSID_len)
+				WL_SCAN(("%d: Broadcast scan\n", i));
+			else
+			WL_SCAN(("%d: scan  for  %s size =%d\n", i, \
+				ssids_local[i].SSID, ssids_local[i].SSID_len));
+		}
+		WL_SCAN(("### List of channels to scan ###\n"));
+		for (i = 0; i < nchan; i++)
+		{
+			WL_SCAN(("%d ", iscan->iscan_ex_params_p->params.channel_list[i]));
+		}
+		WL_SCAN(("\nnprobes=%d\n", iscan->iscan_ex_params_p->params.nprobes));
+		WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+		WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+		WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+		WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+		WL_SCAN(("\n###################\n"));
+	}
+#endif
+
+	if (params_size > WLC_IOCTL_MEDLEN) {
+			WL_ERROR(("Set ISCAN for %s due to params_size=%d  \n", \
+				__FUNCTION__, params_size));
+			err = -1;
+	}
+
+	if ((err = dev_iw_iovar_setbuf(dev, "iscan", iscan->iscan_ex_params_p, \
+			iscan->iscan_ex_param_size, \
+			iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+			WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+			err = -1;
+	}
+
+exit:
+
+	return err;
+}
+
+
+static int iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info, \
+				union iwreq_data *wrqu, char *ext)
+{
+	int res = 0;
+	char  *extra = NULL;
+	iscan_info_t *iscan = g_iscan;
+	wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+	int nssid = 0;
+	int nchan = 0;
+
+	WL_TRACE(("\%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		return -1;
+	}
+
+#ifdef PNO_SET_DEBUG
+	wl_iw_set_pno_set(dev, info, wrqu, extra);
+	return 0;
+#endif
+
+	if (wrqu->data.length != 0) {
+
+		char *str_ptr;
+
+		if (!iscan->iscan_ex_params_p) {
+			return -EFAULT;
+		}
+
+		if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		extra[wrqu->data.length] = 0;
+		WL_ERROR(("Got str param in iw_point:\n %s\n", extra));
+
+		str_ptr = extra;
+
+		if (strncmp(str_ptr, GET_SSID, strlen(GET_SSID))) {
+			WL_ERROR(("%s Error: extracting SSID='' string\n", __FUNCTION__));
+			goto exit_proc;
+		}
+		str_ptr += strlen(GET_SSID);
+		nssid = wl_iw_parse_ssid_list(&str_ptr, ssids_local, nssid, \
+						WL_SCAN_PARAMS_SSID_MAX);
+		if (nssid == -1) {
+			WL_ERROR(("%s wrong ssid list", __FUNCTION__));
+			return -1;
+		}
+
+		if (iscan->iscan_ex_param_size > WLC_IOCTL_MAXLEN) {
+			WL_ERROR(("%s wrong ex_param_size %d", \
+				__FUNCTION__, iscan->iscan_ex_param_size));
+			return -1;
+		}
+		memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+
+		
+		wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+		iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+		iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+		iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+		
+		if ((nchan = wl_iw_parse_channel_list(&str_ptr, \
+					&iscan->iscan_ex_params_p->params.channel_list[0], \
+					WL_NUMCHANNELS)) == -1) {
+			WL_ERROR(("%s missing channel list\n", __FUNCTION__));
+			return -1;
+		}
+
+		
+		get_parmeter_from_string(&str_ptr, \
+				GET_NPROBE, PTYPE_INTDEC, \
+				&iscan->iscan_ex_params_p->params.nprobes, 2);
+
+		get_parmeter_from_string(&str_ptr, GET_ACTIVE_ASSOC_DWELL, PTYPE_INTDEC, \
+						&iscan->iscan_ex_params_p->params.active_time, 4);
+
+		get_parmeter_from_string(&str_ptr, GET_PASSIVE_ASSOC_DWELL, PTYPE_INTDEC, \
+						&iscan->iscan_ex_params_p->params.passive_time, 4);
+
+		get_parmeter_from_string(&str_ptr, GET_HOME_DWELL, PTYPE_INTDEC, \
+					&iscan->iscan_ex_params_p->params.home_time, 4);
+
+		get_parmeter_from_string(&str_ptr, GET_SCAN_TYPE, PTYPE_INTDEC, \
+					&iscan->iscan_ex_params_p->params.scan_type, 1);
+
+		res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+	} else {
+		  WL_ERROR(("IWPRIV argument len = 0 \n"));
+		  return -1;
+	}
+
+exit_proc:
+
+	kfree(extra);
+
+	return res;
+}
+
+
+static int
+wl_iw_set_cscan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int res = -1;
+	iscan_info_t *iscan = g_iscan;
+	wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+	int nssid = 0;
+	int nchan = 0;
+	cscan_tlv_t *cscan_tlv_temp;
+	char type;
+	char *str_ptr;
+	int tlv_size_left;
+#ifdef TLV_DEBUG
+	int i;
+	char tlv_in_example[] = {			'C', 'S', 'C', 'A', 'N', ' ', \
+							0x53, 0x01, 0x00, 0x00,
+							'S',	  
+							0x00, 
+							'S',    
+							0x04, 
+							'B', 'R', 'C', 'M',
+							'C',
+							0x06, 
+							'P', 
+							0x94,
+							0x11,
+							'T',     
+							0x01  
+							};
+#endif 
+
+	WL_TRACE(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	net_os_wake_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+
+	if (wrqu->data.length < (strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))) {
+		WL_ERROR(("%s aggument=%d  less %d\n", __FUNCTION__, \
+			wrqu->data.length, strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t)));
+		goto exit_proc;
+	}
+
+#ifdef TLV_DEBUG
+	memcpy(extra, tlv_in_example, sizeof(tlv_in_example));
+	wrqu->data.length = sizeof(tlv_in_example);
+	for (i = 0; i < wrqu->data.length; i++)
+		printf("%02X ", extra[i]);
+	printf("\n");
+#endif 
+
+	str_ptr = extra;
+	str_ptr +=  strlen(CSCAN_COMMAND);
+	tlv_size_left = wrqu->data.length - strlen(CSCAN_COMMAND);
+
+	cscan_tlv_temp = (cscan_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+	
+	if ((cscan_tlv_temp->prefix == CSCAN_TLV_PREFIX) && \
+		(cscan_tlv_temp->version == CSCAN_TLV_VERSION) && \
+		(cscan_tlv_temp->subver == CSCAN_TLV_SUBVERSION))
+	{
+		str_ptr += sizeof(cscan_tlv_t);
+		tlv_size_left  -= sizeof(cscan_tlv_t);
+
+		
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, \
+				WL_SCAN_PARAMS_SSID_MAX, &tlv_size_left)) <= 0) {
+			WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		}
+		else {
+			
+			memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+
+			
+			wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+			iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+			iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+			iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+			
+			while (tlv_size_left > 0)
+			{
+			type = str_ptr[0];
+			switch (type) {
+				case CSCAN_TLV_TYPE_CHANNEL_IE:
+					
+					if ((nchan = wl_iw_parse_channel_list_tlv(&str_ptr, \
+					&iscan->iscan_ex_params_p->params.channel_list[0], \
+					WL_NUMCHANNELS, &tlv_size_left)) == -1) {
+					WL_ERROR(("%s missing channel list\n", \
+						 __FUNCTION__));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_NPROBE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+						&iscan->iscan_ex_params_p->params.nprobes, \
+						sizeof(iscan->iscan_ex_params_p->params.nprobes), \
+						type, sizeof(char), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n", \
+							__FUNCTION__, res));
+							goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_ACTIVE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+					&iscan->iscan_ex_params_p->params.active_time, \
+					sizeof(iscan->iscan_ex_params_p->params.active_time), \
+					type, sizeof(short), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n", \
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_PASSIVE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+					&iscan->iscan_ex_params_p->params.passive_time, \
+					sizeof(iscan->iscan_ex_params_p->params.passive_time), \
+					type, sizeof(short), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n", \
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_HOME_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+					&iscan->iscan_ex_params_p->params.home_time, \
+					sizeof(iscan->iscan_ex_params_p->params.home_time), \
+					type, sizeof(short), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n", \
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_STYPE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr, \
+					&iscan->iscan_ex_params_p->params.scan_type, \
+					sizeof(iscan->iscan_ex_params_p->params.scan_type), \
+					type, sizeof(char), &tlv_size_left)) == -1) {
+					WL_ERROR(("%s return %d\n", \
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+
+				default :
+					WL_ERROR(("%s get unkwown type %X\n", \
+						__FUNCTION__, type));
+					goto exit_proc;
+				break;
+				}
+			} 
+			}
+		}
+		else {
+			WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+			goto exit_proc;
+		}
+
+#if defined(CONFIG_FIRST_SCAN)
+		if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+			if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+				WL_ERROR(("%s Clean up First scan flag which is %d\n", \
+						 __FUNCTION__, g_first_broadcast_scan));
+				g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+			}
+			else {
+				WL_ERROR(("%s Ignoring CSCAN : First Scan is not done yet %d\n", \
+						__FUNCTION__, g_first_counter_scans));
+				res = -EBUSY;
+				goto exit_proc;
+			}
+		}
+#endif
+
+		res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+exit_proc:
+	net_os_wake_unlock(dev);
+	return res;
+}
+
+#endif 
+
+#ifdef SOFTAP
+#ifndef AP_ONLY
+
+static int thr_wait_for_2nd_eth_dev(void *data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	wl_iw_t *iw;
+	int ret = 0;
+	unsigned long flags;
+
+	net_os_wake_lock(dev);
+
+	DAEMONIZE("wl0_eth_wthread");
+
+	WL_TRACE(("\n>%s thread started:, PID:%x\n", __FUNCTION__, current->pid));
+	iw = *(wl_iw_t **)netdev_priv(dev);
+	if (!iw) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		ret = -1;
+		goto fail;
+	}
+
+#ifndef BCMSDIOH_STD
+	if (down_timeout(&ap_eth_sema,  msecs_to_jiffies(5000)) != 0) {
+		WL_ERROR(("\n%s: sap_eth_sema timeout \n", __FUNCTION__));
+		ret = -1;
+		goto fail;
+	}
+#endif
+
+	flags = dhd_os_spin_lock(iw->pub);
+	if (!ap_net_dev) {
+		WL_ERROR((" ap_net_dev is null !!!"));
+		ret = -1;
+		dhd_os_spin_unlock(iw->pub, flags);
+		goto fail;
+	}
+
+	WL_TRACE(("\n>%s: Thread:'softap ethdev IF:%s is detected !!!'\n\n",
+		__FUNCTION__, ap_net_dev->name));
+
+	ap_cfg_running = TRUE;
+
+	dhd_os_spin_unlock(iw->pub, flags);
+
+	bcm_mdelay(500);
+
+	wl_iw_send_priv_event(priv_dev, "AP_SET_CFG_OK");
+
+fail:
+	WL_TRACE(("\n>%s, thread completed\n", __FUNCTION__));
+
+	net_os_wake_unlock(dev);
+
+	complete_and_exit(&ap_cfg_exited, 0);
+	return ret;
+}
+#endif 
+#ifndef AP_ONLY
+static int last_auto_channel = 6;
+#endif
+static int get_softap_auto_channel(struct net_device *dev, struct ap_profile *ap)
+{
+	int chosen = 0;
+	wl_uint32_list_t request;
+	int rescan = 0;
+	int retry = 0;
+	int updown = 0;
+	int ret = 0;
+	wlc_ssid_t null_ssid;
+	int res = 0;
+#ifndef AP_ONLY
+	int iolen = 0;
+	int mkvar_err = 0;
+	int bsscfg_index = 1;
+	char buf[WLC_IOCTL_SMLEN];
+#endif
+	WL_SOFTAP(("Enter %s\n", __FUNCTION__));
+
+#ifndef AP_ONLY
+	if (ap_cfg_running) {
+		ap->channel = last_auto_channel;
+		return res;
+	}
+#endif
+	memset(&null_ssid, 0, sizeof(wlc_ssid_t));
+	res |= dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown));
+#ifdef AP_ONLY
+	res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid));
+#else
+	iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&null_ssid), \
+		null_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+	ASSERT(iolen);
+	res |= dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen);
+#endif
+	auto_channel_retry:
+			request.count = htod32(0);
+			ret = dev_wlc_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request));
+			if (ret < 0) {
+				WL_ERROR(("can't start auto channel scan\n"));
+				goto fail;
+			}
+
+	get_channel_retry:
+			bcm_mdelay(500);
+
+			ret = dev_wlc_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+			if (ret < 0 || dtoh32(chosen) == 0) {
+				if (retry++ < 3)
+					goto get_channel_retry;
+				else {
+					WL_ERROR(("can't get auto channel sel, err = %d, \
+						chosen = %d\n", ret, chosen));
+					goto fail;
+				}
+			}
+			if ((chosen == 1) && (!rescan++))
+				goto auto_channel_retry;
+			WL_SOFTAP(("Set auto channel = %d\n", chosen));
+			ap->channel = chosen;
+			if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown))) < 0) {
+				WL_ERROR(("%s fail to set up err =%d\n", __FUNCTION__, res));
+				goto fail;
+			}
+#ifndef AP_ONLY
+	if (!res)
+		last_auto_channel = ap->channel;
+#endif
+
+fail :
+	return res;
+}
+
+
+static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap)
+{
+	int updown = 0;
+	int channel = 0;
+
+	wlc_ssid_t ap_ssid;
+	int max_assoc = 8;
+
+	int res = 0;
+	int apsta_var = 0;
+#ifndef AP_ONLY
+	int mpc = 0;
+	int iolen = 0;
+	int mkvar_err = 0;
+	int bsscfg_index = 1;
+	char buf[WLC_IOCTL_SMLEN];
+#endif
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_SOFTAP(("wl_iw: set ap profile:\n"));
+	WL_SOFTAP(("	ssid = '%s'\n", ap->ssid));
+	WL_SOFTAP(("	security = '%s'\n", ap->sec));
+	if (ap->key[0] != '\0')
+		WL_SOFTAP(("	key = '%s'\n", ap->key));
+	WL_SOFTAP(("	channel = %d\n", ap->channel));
+	WL_SOFTAP(("	max scb = %d\n", ap->max_scb));
+
+#ifdef AP_ONLY
+	if (ap_cfg_running) {
+		wl_iw_softap_deassoc_stations(dev, NULL);
+		ap_cfg_running = FALSE;
+	}
+#endif
+
+	if (ap_cfg_running == FALSE) {
+
+#ifndef AP_ONLY
+		sema_init(&ap_eth_sema, 0);
+
+		mpc = 0;
+		if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) {
+			WL_ERROR(("%s fail to set mpc\n", __FUNCTION__));
+			goto fail;
+		}
+#endif
+
+		updown = 0;
+		if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown)))) {
+			WL_ERROR(("%s fail to set updown\n", __FUNCTION__));
+			goto fail;
+		}
+
+#ifdef AP_ONLY
+		apsta_var = 0;
+		if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+			WL_ERROR(("%s fail to set apsta_var 0\n", __FUNCTION__));
+			goto fail;
+		}
+		apsta_var = 1;
+		if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+			WL_ERROR(("%s fail to set apsta_var 1\n", __FUNCTION__));
+			goto fail;
+		}
+		res = dev_wlc_ioctl(dev, WLC_GET_AP, &apsta_var, sizeof(apsta_var));
+#else
+		apsta_var = 1;
+		iolen = wl_bssiovar_mkbuf("apsta",
+			bsscfg_index,  &apsta_var, sizeof(apsta_var)+4,
+			buf, sizeof(buf), &mkvar_err);
+
+		if (iolen <= 0)
+			goto fail;
+
+		if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+			WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+			goto fail;
+		}
+		WL_TRACE(("\n>in %s: apsta set result: %d \n", __FUNCTION__, res));
+#endif
+
+		updown = 1;
+		if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown))) < 0) {
+			WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+			goto fail;
+		}
+
+	} else {
+		
+		if (!ap_net_dev) {
+			WL_ERROR(("%s: ap_net_dev is null\n", __FUNCTION__));
+			goto fail;
+		}
+
+		res = wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+
+		
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+			WL_ERROR(("%s fail to set bss down\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+
+	if (strlen(ap->country_code)) {
+		WL_ERROR(("%s: Igonored: Country MUST be specified \
+				  COUNTRY command with \n",	__FUNCTION__));
+	} else {
+		WL_SOFTAP(("%s: Country code is not specified,"
+			" will use Radio's default\n",
+			__FUNCTION__));
+	}
+
+	iolen = wl_bssiovar_mkbuf("closednet",
+		bsscfg_index,  &ap->closednet, sizeof(ap->closednet)+4,
+		buf, sizeof(buf), &mkvar_err);
+	ASSERT(iolen);
+	if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+		WL_ERROR(("%s failed to set 'closednet'for apsta \n", __FUNCTION__));
+		goto fail;
+	}
+
+	
+	if ((ap->channel == 0) && (get_softap_auto_channel(dev, ap) < 0)) {
+		ap->channel = 1;
+		WL_ERROR(("%s auto channel failed, pick up channel=%d\n", \
+			__FUNCTION__, ap->channel));
+	}
+
+	channel = ap->channel;
+	if ((res = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel)))) {
+			WL_ERROR(("%s fail to set channel\n", __FUNCTION__));
+			goto fail;
+	}
+
+	if (ap_cfg_running == FALSE) {
+		updown = 0;
+		if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)))) {
+			WL_ERROR(("%s fail to set up\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+
+	max_assoc = ap->max_scb;
+	if ((res = dev_wlc_intvar_set(dev, "maxassoc", max_assoc))) {
+			WL_ERROR(("%s fail to set maxassoc\n", __FUNCTION__));
+			goto fail;
+	}
+
+	ap_ssid.SSID_len = strlen(ap->ssid);
+	strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+
+#ifdef AP_ONLY
+	if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+		WL_ERROR(("ERROR:%d in:%s, wl_iw_set_ap_security is skipped\n", \
+		res, __FUNCTION__));
+		goto fail;
+	}
+	wl_iw_send_priv_event(dev, "ASCII_CMD=AP_BSS_START");
+	ap_cfg_running = TRUE;
+#else
+	iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&ap_ssid),
+		ap_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+	ASSERT(iolen);
+	if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) != 0) {
+		WL_ERROR(("ERROR:%d in:%s, Security & BSS reconfiguration is skipped\n", \
+		res, __FUNCTION__));
+		goto fail;
+	}
+	if (ap_cfg_running == FALSE) {
+		init_completion(&ap_cfg_exited);
+		ap_cfg_pid = kernel_thread(thr_wait_for_2nd_eth_dev, dev, 0);
+	} else {
+		ap_cfg_pid = -1;
+		if (ap_net_dev == NULL) {
+			WL_ERROR(("%s ERROR: ap_net_dev is NULL !!!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		WL_ERROR(("%s: %s Configure security & restart AP bss \n", \
+			 __FUNCTION__, ap_net_dev->name));
+
+		if ((res = wl_iw_set_ap_security(ap_net_dev, &my_ap)) < 0) {
+			WL_ERROR(("%s fail to set security : %d\n", __FUNCTION__, res));
+			goto fail;
+		}
+
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) {
+			WL_ERROR(("%s fail to set bss up\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+#endif 
+fail:
+	WL_SOFTAP(("%s exit with %d\n", __FUNCTION__, res));
+
+	net_os_wake_unlock(dev);
+
+	return res;
+}
+
+
+static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap)
+{
+	int wsec = 0;
+	int wpa_auth = 0;
+	int res = 0;
+	int i;
+	char *ptr;
+#ifdef AP_ONLY
+	int mpc = 0;
+	wlc_ssid_t ap_ssid;
+#endif
+	wl_wsec_key_t key;
+
+	WL_SOFTAP(("\nsetting SOFTAP security mode:\n"));
+	WL_SOFTAP(("wl_iw: set ap profile:\n"));
+	WL_SOFTAP(("	ssid = '%s'\n", ap->ssid));
+	WL_SOFTAP(("	security = '%s'\n", ap->sec));
+	if (ap->key[0] != '\0') {
+		WL_SOFTAP(("	key = '%s'\n", ap->key));
+	}
+	WL_SOFTAP(("	channel = %d\n", ap->channel));
+	WL_SOFTAP(("	max scb = %d\n", ap->max_scb));
+
+	if (strnicmp(ap->sec, "open", strlen("open")) == 0) {
+		wsec = 0;
+		res = dev_wlc_intvar_set(dev, "wsec", wsec);
+		wpa_auth = WPA_AUTH_DISABLED;
+		res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+		WL_SOFTAP(("=====================\n"));
+		WL_SOFTAP((" wsec & wpa_auth set 'OPEN', result:&d %d\n", res));
+		WL_SOFTAP(("=====================\n"));
+
+	} else if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+
+		memset(&key, 0, sizeof(key));
+
+		wsec = WEP_ENABLED;
+		res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+		key.index = 0;
+		if (wl_iw_parse_wep(ap->key, &key)) {
+			WL_SOFTAP(("wep key parse err!\n"));
+			return -1;
+		}
+
+		key.index = htod32(key.index);
+		key.len = htod32(key.len);
+		key.algo = htod32(key.algo);
+		key.flags = htod32(key.flags);
+
+		res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+
+		wpa_auth = WPA_AUTH_DISABLED;
+		res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+		WL_SOFTAP(("=====================\n"));
+		WL_SOFTAP((" wsec & auth set 'WEP', result:&d %d\n", res));
+		WL_SOFTAP(("=====================\n"));
+
+	} else if (strnicmp(ap->sec, "wpa2-psk", strlen("wpa2-psk")) == 0) {
+		wsec_pmk_t psk;
+		size_t key_len;
+
+		wsec = AES_ENABLED;
+		dev_wlc_intvar_set(dev, "wsec", wsec);
+
+		key_len = strlen(ap->key);
+		if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+			WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+			WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+			return -1;
+		}
+
+		if (key_len < WSEC_MAX_PSK_LEN) {
+			unsigned char output[2*SHA1HashSize];
+			char key_str_buf[WSEC_MAX_PSK_LEN+1];
+
+			memset(output, 0, sizeof(output));
+			pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+
+			ptr = key_str_buf;
+			for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+				sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4], \
+					 (uint)output[i*4+1], (uint)output[i*4+2], \
+					 (uint)output[i*4+3]);
+				ptr += 8;
+			}
+			WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf));
+
+			psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+			memcpy(psk.key, key_str_buf, psk.key_len);
+		} else {
+			psk.key_len = htod16((ushort) key_len);
+			memcpy(psk.key, ap->key, key_len);
+		}
+		psk.flags = htod16(WSEC_PASSPHRASE);
+		dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+		wpa_auth = WPA2_AUTH_PSK;
+		dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+	} else if (strnicmp(ap->sec, "wpa-psk", strlen("wpa-psk")) == 0) {
+
+		wsec_pmk_t psk;
+		size_t key_len;
+
+		wsec = TKIP_ENABLED;
+		res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+		key_len = strlen(ap->key);
+		if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+			WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+			WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+			return -1;
+		}
+
+		if (key_len < WSEC_MAX_PSK_LEN) {
+			unsigned char output[2*SHA1HashSize];
+			char key_str_buf[WSEC_MAX_PSK_LEN+1];
+			bzero(output, 2*SHA1HashSize);
+
+			WL_SOFTAP(("%s: do passhash...\n", __FUNCTION__));
+
+			pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+
+			ptr = key_str_buf;
+			for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+				WL_SOFTAP(("[%02d]: %08x\n", i, *((unsigned int *)&output[i*4])));
+
+				sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4],
+					(uint)output[i*4+1], (uint)output[i*4+2],
+					(uint)output[i*4+3]);
+				ptr += 8;
+			}
+			WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf));
+
+			psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+			memcpy(psk.key, key_str_buf, psk.key_len);
+		} else {
+			psk.key_len = htod16((ushort) key_len);
+			memcpy(psk.key, ap->key, key_len);
+		}
+
+		psk.flags = htod16(WSEC_PASSPHRASE);
+		res |= dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+		wpa_auth = WPA_AUTH_PSK;
+		res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+		WL_SOFTAP((" wsec & auth set 'wpa-psk' (TKIP), result:&d %d\n", res));
+	}
+
+#ifdef AP_ONLY
+		ap_ssid.SSID_len = strlen(ap->ssid);
+		strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+		res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &ap_ssid, sizeof(ap_ssid));
+		mpc = 0;
+		res |= dev_wlc_intvar_set(dev, "mpc", mpc);
+		if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+			res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		}
+#endif
+	return res;
+}
+
+
+
+int get_parmeter_from_string(
+			char **str_ptr, const char *token,
+			int param_type, void  *dst, int param_max_len)
+{
+	char int_str[7] = "0";
+	int parm_str_len;
+	char  *param_str_begin;
+	char  *param_str_end;
+
+	if ((*str_ptr) && !strncmp(*str_ptr, token, strlen(token))) {
+
+		strsep(str_ptr, "=,");
+		param_str_begin = *str_ptr;
+		strsep(str_ptr, "=,");
+
+		if (*str_ptr == NULL) {
+			parm_str_len = strlen(param_str_begin);
+		} else {
+			param_str_end = *str_ptr-1;
+			parm_str_len = param_str_end - param_str_begin;
+		}
+
+		WL_TRACE((" 'token:%s', len:%d, ", token, parm_str_len));
+
+		if (parm_str_len > param_max_len) {
+			WL_TRACE((" WARNING: extracted param len:%d is > MAX:%d\n",
+				parm_str_len, param_max_len));
+
+			parm_str_len = param_max_len;
+		}
+
+		switch (param_type) {
+
+		case PTYPE_INTDEC: {
+			int *pdst_int = dst;
+			char *eptr;
+
+			if (parm_str_len > sizeof(int_str))
+				 parm_str_len = sizeof(int_str);
+
+			memcpy(int_str, param_str_begin, parm_str_len);
+
+			*pdst_int = simple_strtoul(int_str, &eptr, 10);
+
+			WL_TRACE((" written as integer:%d\n",  *pdst_int));
+			}
+			break;
+		case PTYPE_STR_HEX: {
+			u8 *buf = dst;
+
+			param_max_len = param_max_len >> 1;
+			hstr_2_buf(param_str_begin, buf, param_max_len);
+			print_buf(buf, param_max_len, 0);
+			}
+			break;
+		default:
+			memcpy(dst, param_str_begin, parm_str_len);
+			*((char *)dst + parm_str_len) = 0;
+			WL_TRACE((" written as a string:%s\n", (char *)dst));
+			break;
+		}
+
+		return 0;
+	} else {
+		WL_ERROR(("\n %s: No token:%s in str:%s\n",
+			__FUNCTION__, token, *str_ptr));
+
+		return -1;
+	}
+}
+
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac)
+{
+	int i;
+	int res = 0;
+	char mac_buf[128] = {0};
+	char z_mac[6] = {0, 0, 0, 0, 0, 0};
+	char *sta_mac;
+	struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+	bool deauth_all = false;
+
+	if (mac == NULL) {
+		deauth_all = true;
+		sta_mac = z_mac;
+	} else {
+		sta_mac = mac;
+	}
+
+	memset(assoc_maclist, 0, sizeof(mac_buf));
+	assoc_maclist->count = 8;
+
+	res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 128);
+	if (res != 0) {
+		WL_SOFTAP(("%s: Error:%d Couldn't get ASSOC List\n", __FUNCTION__, res));
+		return res;
+	}
+
+	if (assoc_maclist->count) {
+		for (i = 0; i < assoc_maclist->count; i++) {
+			scb_val_t scbval;
+
+			scbval.val = htod32(1);
+			bcopy(&assoc_maclist->ea[i], &scbval.ea, ETHER_ADDR_LEN);
+
+			if (deauth_all || (memcmp(&scbval.ea, sta_mac, ETHER_ADDR_LEN) == 0)) {
+				WL_SOFTAP(("%s, deauth STA:%d \n", __FUNCTION__, i));
+				res |= dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+					&scbval, sizeof(scb_val_t));
+			}
+		}
+	} else {
+		WL_SOFTAP((" STA ASSOC list is empty\n"));
+	}
+
+	if (res != 0) {
+		WL_ERROR(("%s: Error:%d\n", __FUNCTION__, res));
+	} else if (assoc_maclist->count) {
+		bcm_mdelay(200);
+	}
+	return res;
+}
+
+
+static int iwpriv_softap_stop(struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *ext)
+{
+	int res = 0;
+
+	WL_SOFTAP(("got iwpriv AP_BSS_STOP\n"));
+
+	if ((!dev) && (!ap_net_dev)) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return res;
+	}
+
+	net_os_wake_lock(dev);
+
+	if ((ap_cfg_running == TRUE)) {
+#ifdef AP_ONLY
+		wl_iw_softap_deassoc_stations(dev, NULL);
+#else
+		wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 2)) < 0)
+			WL_ERROR(("%s failed to del BSS err = %d", __FUNCTION__, res));
+#endif
+
+		bcm_mdelay(100);
+
+		wrqu->data.length = 0;
+		ap_cfg_running = FALSE;
+	}
+	else
+		WL_ERROR(("%s: was called when SoftAP is OFF : move on\n", __FUNCTION__));
+
+	WL_SOFTAP(("%s Done with %d\n", __FUNCTION__, res));
+
+	net_os_wake_unlock(dev);
+
+	return res;
+}
+
+
+static int iwpriv_fw_reload(struct net_device *dev,
+		struct iw_request_info *info,
+		union iwreq_data *wrqu,
+		char *ext)
+{
+	int ret = -1;
+	char extra[256];
+	char *fwstr = fw_path;
+
+	WL_SOFTAP(("current firmware_path[]=%s\n", fwstr));
+
+	WL_TRACE((">Got FW_RELOAD cmd:"
+				"info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d, \
+				fw_path:%p, len:%d \n",
+				info->cmd, info->flags,
+				wrqu->data.pointer, wrqu->data.length, fwstr, strlen(fwstr)));
+
+	if ((wrqu->data.length > 4) && (wrqu->data.length < sizeof(extra))) {
+
+		char *str_ptr;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			ret = -EFAULT;
+			goto exit_proc;
+		}
+
+		extra[wrqu->data.length] = 8;
+		str_ptr = extra;
+
+		if (get_parmeter_from_string(&str_ptr, "FW_PATH=", PTYPE_STRING, fwstr, 255) != 0) {
+			WL_ERROR(("Error: extracting FW_PATH='' string\n"));
+			goto exit_proc;
+		}
+
+		if (strstr(fwstr, "apsta") != NULL) {
+			WL_SOFTAP(("GOT APSTA FIRMWARE\n"));
+			ap_fw_loaded = TRUE;
+		} else {
+			WL_SOFTAP(("GOT STA FIRMWARE\n"));
+			ap_fw_loaded = FALSE;
+		}
+
+		WL_SOFTAP(("SET firmware_path[]=%s , str_p:%p\n", fwstr, fwstr));
+		ret = 0;
+	} else {
+		WL_ERROR(("Error: ivalid param len:%d\n", wrqu->data.length));
+	}
+
+exit_proc:
+	return ret;
+}
+#endif
+
+#ifdef SOFTAP
+static int iwpriv_wpasupp_loop_tst(struct net_device *dev,
+		struct iw_request_info *info,
+		union iwreq_data *wrqu,
+		char *ext)
+{
+	int res = 0;
+	char  *params = NULL;
+
+	WL_TRACE((">Got IWPRIV  wp_supp loopback cmd test:"
+				"info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+				info->cmd, info->flags,
+				wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		if (!(params = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(params, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(params);
+			return -EFAULT;
+		}
+
+		params[wrqu->data.length] = 0;
+		WL_SOFTAP(("\n>> copied from user:\n %s\n", params));
+	} else {
+		WL_ERROR(("ERROR param length is 0\n"));
+		return -EFAULT;
+	}
+
+	res = wl_iw_send_priv_event(dev, params);
+	kfree(params);
+
+	return res;
+}
+#endif
+
+
+static int
+iwpriv_en_ap_bss(
+		struct net_device *dev,
+		struct iw_request_info *info,
+		void *wrqu,
+		char *extra)
+{
+	int res = 0;
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_SOFTAP(("%s: rcvd IWPRIV IOCTL:  for dev:%s\n", __FUNCTION__, dev->name));
+
+#ifndef AP_ONLY
+	if (ap_cfg_pid >= 0) {
+		wait_for_completion(&ap_cfg_exited);
+		ap_cfg_pid = -1;
+	}
+
+	if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+		WL_ERROR((" %s ERROR setting SOFTAP security in :%d\n", __FUNCTION__, res));
+	}
+	else {
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0)
+			WL_ERROR(("%s fail to set bss up err=%d\n", __FUNCTION__, res));
+		else
+			bcm_mdelay(100);
+	}
+
+#endif 
+	WL_SOFTAP(("%s done with res %d \n", __FUNCTION__, res));
+
+	net_os_wake_unlock(dev);
+
+	return res;
+}
+
+static int
+get_assoc_sta_list(struct net_device *dev, char *buf, int len)
+{
+	WL_TRACE(("%s: dev_wlc_ioctl(dev:%p, cmd:%d, buf:%p, len:%d)\n",
+		__FUNCTION__, dev, WLC_GET_ASSOCLIST, buf, len));
+
+	return dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, buf, len);
+
+}
+
+
+void check_error(int res, const char *msg, const char *func, int line)
+{
+	if (res != 0)
+		WL_ERROR(("%s, %d function:%s, line:%d\n", msg, res, func, line));
+}
+
+static int
+set_ap_mac_list(struct net_device *dev, void *buf)
+{
+	struct mac_list_set *mac_list_set = (struct mac_list_set *)buf;
+	struct maclist *maclist = (struct maclist *)&mac_list_set->mac_list;
+	int length;
+	int i;
+	int mac_mode = mac_list_set->mode;
+	int ioc_res = 0;
+	ap_macmode = mac_list_set->mode;
+
+	bzero(&ap_black_list, sizeof(struct mflist));
+
+	if (mac_mode == MACLIST_MODE_DISABLED) {
+
+		ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+		check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+		WL_SOFTAP(("%s: MAC filtering disabled\n", __FUNCTION__));
+	} else {
+
+		scb_val_t scbval;
+		char mac_buf[256] = {0};
+		struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+
+		bcopy(maclist, &ap_black_list, sizeof(ap_black_list));
+
+		ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+		check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+
+		length = sizeof(maclist->count) + maclist->count*ETHER_ADDR_LEN;
+		dev_wlc_ioctl(dev, WLC_SET_MACLIST, maclist, length);
+
+		WL_SOFTAP(("%s: applied MAC List, mode:%d, length %d:\n",
+			__FUNCTION__, mac_mode, length));
+		for (i = 0; i < maclist->count; i++)
+			WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n",
+				i, maclist->ea[i].octet[0], maclist->ea[i].octet[1], \
+				maclist->ea[i].octet[2], \
+				maclist->ea[i].octet[3], maclist->ea[i].octet[4], \
+				maclist->ea[i].octet[5]));
+
+		assoc_maclist->count = 8;
+		ioc_res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 256);
+		check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+		WL_SOFTAP((" Cur assoc clients:%d\n", assoc_maclist->count));
+
+		if (assoc_maclist->count)
+			for (i = 0; i < assoc_maclist->count; i++) {
+				int j;
+				bool assoc_mac_matched = false;
+
+				WL_SOFTAP(("\n Cheking assoc STA: "));
+				print_buf(&assoc_maclist->ea[i], 6, 7);
+				WL_SOFTAP(("with the b/w list:"));
+
+				for (j = 0; j < maclist->count; j++)
+					if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j],
+						ETHER_ADDR_LEN)) {
+
+						assoc_mac_matched = true;
+						break;
+					}
+
+				if (((mac_mode == MACLIST_MODE_ALLOW) && !assoc_mac_matched) ||
+					((mac_mode == MACLIST_MODE_DENY) && assoc_mac_matched)) {
+
+					WL_SOFTAP(("b-match or w-mismatch,"
+								" do deauth/disassoc \n"));
+							scbval.val = htod32(1);
+							bcopy(&assoc_maclist->ea[i], &scbval.ea, \
+							ETHER_ADDR_LEN);
+							ioc_res = dev_wlc_ioctl(dev,
+								WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+								&scbval, sizeof(scb_val_t));
+							check_error(ioc_res,
+								"ioctl ERROR:",
+								__FUNCTION__, __LINE__);
+
+				} else {
+					WL_SOFTAP((" no b/w list hits, let it be\n"));
+				}
+		} else {
+			WL_SOFTAP(("No ASSOC CLIENTS\n"));
+		}
+	} 
+
+	WL_SOFTAP(("%s iocres:%d\n", __FUNCTION__, ioc_res));
+	return ioc_res;
+}
+#endif
+
+
+#ifdef SOFTAP
+int set_macfilt_from_string(struct mflist *pmflist, char **param_str)
+{
+	return 0;
+}
+#endif
+
+
+#ifdef SOFTAP
+#define PARAM_OFFSET PROFILE_OFFSET
+
+int wl_iw_process_private_ascii_cmd(
+			struct net_device *dev,
+			struct iw_request_info *info,
+			union iwreq_data *dwrq,
+			char *cmd_str)
+{
+	int ret = 0;
+	char *sub_cmd = cmd_str + PROFILE_OFFSET + strlen("ASCII_CMD=");
+
+	WL_SOFTAP(("\n %s: ASCII_CMD: offs_0:%s, offset_32:\n'%s'\n",
+		__FUNCTION__, cmd_str, cmd_str + PROFILE_OFFSET));
+
+	if (strnicmp(sub_cmd, "AP_CFG", strlen("AP_CFG")) == 0) {
+
+		WL_SOFTAP((" AP_CFG \n"));
+
+
+		if (init_ap_profile_from_string(cmd_str+PROFILE_OFFSET, &my_ap) != 0) {
+			WL_ERROR(("ERROR: SoftAP CFG prams !\n"));
+			ret = -1;
+		} else {
+			ret = set_ap_cfg(dev, &my_ap);
+		}
+
+	} else if (strnicmp(sub_cmd, "AP_BSS_START", strlen("AP_BSS_START")) == 0) {
+
+		WL_SOFTAP(("\n SOFTAP - ENABLE BSS \n"));
+
+		WL_SOFTAP(("\n!!! got 'WL_AP_EN_BSS' from WPA supplicant, dev:%s\n", dev->name));
+
+#ifndef AP_ONLY
+		if (ap_net_dev == NULL) {
+			printf("\n ERROR: SOFTAP net_dev* is NULL !!!\n");
+		} else {
+			if ((ret = iwpriv_en_ap_bss(ap_net_dev, info, dwrq, cmd_str)) < 0)
+				WL_ERROR(("%s line %d fail to set bss up\n", \
+					__FUNCTION__, __LINE__));
+		}
+#else
+		if ((ret = iwpriv_en_ap_bss(dev, info, dwrq, cmd_str)) < 0)
+				WL_ERROR(("%s line %d fail to set bss up\n", \
+					__FUNCTION__, __LINE__));
+#endif
+	} else if (strnicmp(sub_cmd, "ASSOC_LST", strlen("ASSOC_LST")) == 0) {
+		/* no code yet */
+	} else if (strnicmp(sub_cmd, "AP_BSS_STOP", strlen("AP_BSS_STOP")) == 0) {
+		WL_SOFTAP((" \n temp DOWN SOFTAP\n"));
+#ifndef AP_ONLY
+		if ((ret = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+				WL_ERROR(("%s line %d fail to set bss down\n", \
+					__FUNCTION__, __LINE__));
+		}
+#endif
+	}
+
+	return ret;
+}
+#endif
+
+static int wl_iw_set_priv(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *ext
+)
+{
+	int ret = 0;
+	char * extra;
+
+	if (!(extra = kmalloc(dwrq->length, GFP_KERNEL)))
+	    return -ENOMEM;
+
+	if (copy_from_user(extra, dwrq->pointer, dwrq->length)) {
+	    kfree(extra);
+	    return -EFAULT;
+	}
+
+	WL_TRACE(("%s: SIOCSIWPRIV request %s, info->cmd:%x, info->flags:%d\n dwrq->length:%d",
+		dev->name, extra, info->cmd, info->flags, dwrq->length));
+
+	net_os_wake_lock(dev);
+	
+	if (dwrq->length && extra) {
+		if (strnicmp(extra, "START", strlen("START")) == 0) {
+			wl_iw_control_wl_on(dev, info);
+			WL_TRACE(("%s, Received regular START command\n", __FUNCTION__));
+		}
+
+		if (g_onoff == G_WLAN_SET_OFF) {
+			WL_TRACE(("%s, missing START, Fail\n", __FUNCTION__));
+			kfree(extra);
+			net_os_wake_unlock(dev);
+			return -EFAULT;
+		}
+
+		if (strnicmp(extra, "SCAN-ACTIVE", strlen("SCAN-ACTIVE")) == 0) {
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+			WL_TRACE(("%s: active scan setting suppressed\n", dev->name));
+#else
+			ret = wl_iw_set_active_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+		} else if (strnicmp(extra, "SCAN-PASSIVE", strlen("SCAN-PASSIVE")) == 0)
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+			WL_TRACE(("%s: passive scan setting suppressed\n", dev->name));
+#else
+			ret = wl_iw_set_passive_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+		else if (strnicmp(extra, "RSSI", strlen("RSSI")) == 0)
+			ret = wl_iw_get_rssi(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, "LINKSPEED", strlen("LINKSPEED")) == 0)
+			ret = wl_iw_get_link_speed(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, "MACADDR", strlen("MACADDR")) == 0)
+			ret = wl_iw_get_macaddr(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, "COUNTRY", strlen("COUNTRY")) == 0)
+			ret = wl_iw_set_country(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, "STOP", strlen("STOP")) == 0)
+			ret = wl_iw_control_wl_off(dev, info);
+		else if (strnicmp(extra, BAND_GET_CMD, strlen(BAND_GET_CMD)) == 0)
+			ret = wl_iw_get_band(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, BAND_SET_CMD, strlen(BAND_SET_CMD)) == 0)
+			ret = wl_iw_set_band(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, DTIM_SKIP_GET_CMD, strlen(DTIM_SKIP_GET_CMD)) == 0)
+			ret = wl_iw_get_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, DTIM_SKIP_SET_CMD, strlen(DTIM_SKIP_SET_CMD)) == 0)
+			ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, SETSUSPEND_CMD, strlen(SETSUSPEND_CMD)) == 0)
+			ret = wl_iw_set_suspend(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, TXPOWER_SET_CMD, strlen(TXPOWER_SET_CMD)) == 0)
+			ret = wl_iw_set_txpower(dev, info, (union iwreq_data *)dwrq, extra);
+#if defined(PNO_SUPPORT)
+		else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0)
+			ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, PNOSETUP_SET_CMD, strlen(PNOSETUP_SET_CMD)) == 0)
+			ret = wl_iw_set_pno_set(dev, info, (union iwreq_data *)dwrq, extra);
+		else if (strnicmp(extra, PNOENABLE_SET_CMD, strlen(PNOENABLE_SET_CMD)) == 0)
+			ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+#if defined(CSCAN)
+	    else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0)
+			ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif 
+#ifdef CUSTOMER_HW2
+		else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0)
+			ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) {
+			WL_TRACE_COEX(("%s:got Framwrork cmd: 'BTCOEXMODE'\n", __FUNCTION__));
+			ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra);
+	    }
+#else
+		else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0)
+			ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra);
+#endif
+		else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0)
+			ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+#ifdef SOFTAP
+#ifdef SOFTAP_TLV_CFG
+		else if (strnicmp(extra, SOFTAP_SET_CMD, strlen(SOFTAP_SET_CMD)) == 0) {
+		    wl_iw_softap_cfg_tlv(dev, info, (union iwreq_data *)dwrq, extra);
+	    }
+#endif
+		else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) {
+			wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra);
+		} else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) {
+			WL_SOFTAP(("penguin, set AP_MAC_LIST_SET\n"));
+			set_ap_mac_list(dev, (extra + PROFILE_OFFSET));
+		}
+#endif
+		else {
+			WL_TRACE(("Unknown PRIVATE command: %s: ignored\n", extra));
+			snprintf(extra, MAX_WX_STRING, "OK");
+			dwrq->length = strlen("OK") + 1;
+		}
+	}
+
+	net_os_wake_unlock(dev);
+
+	if (extra) {
+		if (copy_to_user(dwrq->pointer, extra, dwrq->length)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		kfree(extra);
+	}
+
+	return ret;
+}
+
+static const iw_handler wl_iw_handler[] =
+{
+	(iw_handler) wl_iw_config_commit,	
+	(iw_handler) wl_iw_get_name,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_freq,		
+	(iw_handler) wl_iw_get_freq,		
+	(iw_handler) wl_iw_set_mode,		
+	(iw_handler) wl_iw_get_mode,		
+	(iw_handler) NULL,
+	(iw_handler) NULL,
+	(iw_handler) NULL,
+	(iw_handler) wl_iw_get_range,
+	(iw_handler) wl_iw_set_priv,
+	(iw_handler) NULL,
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_spy,		
+	(iw_handler) wl_iw_get_spy,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_wap,		
+	(iw_handler) wl_iw_get_wap,		
+#if WIRELESS_EXT > 17
+	(iw_handler) wl_iw_mlme,		
+#else
+	(iw_handler) NULL,			
+#endif
+#if defined(WL_IW_USE_ISCAN)
+	(iw_handler) wl_iw_iscan_get_aplist,	
+#else
+	(iw_handler) wl_iw_get_aplist,		
+#endif 
+#if WIRELESS_EXT > 13
+#if defined(WL_IW_USE_ISCAN)
+	(iw_handler) wl_iw_iscan_set_scan,	
+	(iw_handler) wl_iw_iscan_get_scan,	
+#else
+	(iw_handler) wl_iw_set_scan,		
+	(iw_handler) wl_iw_get_scan,		
+#endif
+#else	
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+#endif	
+	(iw_handler) wl_iw_set_essid,		
+	(iw_handler) wl_iw_get_essid,		
+	(iw_handler) wl_iw_set_nick,
+	(iw_handler) wl_iw_get_nick,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_rate,		
+	(iw_handler) wl_iw_get_rate,		
+	(iw_handler) wl_iw_set_rts,		
+	(iw_handler) wl_iw_get_rts,		
+	(iw_handler) wl_iw_set_frag,		
+	(iw_handler) wl_iw_get_frag,		
+	(iw_handler) wl_iw_set_txpow,		
+	(iw_handler) wl_iw_get_txpow,		
+#if WIRELESS_EXT > 10
+	(iw_handler) wl_iw_set_retry,		
+	(iw_handler) wl_iw_get_retry,		
+#endif 
+	(iw_handler) wl_iw_set_encode,		
+	(iw_handler) wl_iw_get_encode,		
+	(iw_handler) wl_iw_set_power,		
+	(iw_handler) wl_iw_get_power,		
+#if WIRELESS_EXT > 17
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_wpaie,		
+	(iw_handler) wl_iw_get_wpaie,		
+	(iw_handler) wl_iw_set_wpaauth,		
+	(iw_handler) wl_iw_get_wpaauth,		
+	(iw_handler) wl_iw_set_encodeext,	
+	(iw_handler) wl_iw_get_encodeext,	
+#ifdef BCMWPA2
+	(iw_handler) wl_iw_set_pmksa,			
+#endif
+#endif 
+};
+
+#if WIRELESS_EXT > 12
+static const iw_handler wl_iw_priv_handler[] = {
+	NULL,
+	(iw_handler)wl_iw_set_active_scan,
+	NULL,
+	(iw_handler)wl_iw_get_rssi,
+	NULL,
+	(iw_handler)wl_iw_set_passive_scan,
+	NULL,
+	(iw_handler)wl_iw_get_link_speed,
+	NULL,
+	(iw_handler)wl_iw_get_macaddr,
+	NULL,
+	(iw_handler)wl_iw_control_wl_off,
+	NULL,
+	(iw_handler)wl_iw_control_wl_on,
+#ifdef SOFTAP
+	NULL,
+	(iw_handler)iwpriv_set_ap_config,
+
+	NULL,
+	(iw_handler)iwpriv_get_assoc_list,
+
+	NULL,
+	(iw_handler)iwpriv_set_mac_filters,
+
+	NULL,
+	(iw_handler)iwpriv_en_ap_bss,
+
+	NULL,
+	(iw_handler)iwpriv_wpasupp_loop_tst,
+
+	NULL,
+	(iw_handler)iwpriv_softap_stop,
+
+	NULL,
+	(iw_handler)iwpriv_fw_reload,
+
+	NULL,
+	(iw_handler)iwpriv_set_ap_sta_disassoc,
+#endif
+#if defined(CSCAN)
+
+	NULL,
+	(iw_handler)iwpriv_set_cscan
+#endif 	
+};
+
+static const struct iw_priv_args wl_iw_priv_args[] = {
+	{
+		WL_IW_SET_ACTIVE_SCAN,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"SCAN-ACTIVE"
+	},
+	{
+		WL_IW_GET_RSSI,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"RSSI"
+	},
+	{
+		WL_IW_SET_PASSIVE_SCAN,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"SCAN-PASSIVE"
+	},
+	{
+		WL_IW_GET_LINK_SPEED,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"LINKSPEED"
+	},
+	{
+		WL_IW_GET_CURR_MACADDR,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"Macaddr"
+	},
+	{
+		WL_IW_SET_STOP,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"STOP"
+	},
+	{
+		WL_IW_SET_START,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"START"
+	},
+
+#ifdef SOFTAP
+	{
+		WL_SET_AP_CFG,
+		IW_PRIV_TYPE_CHAR |  256,
+		0,
+		"AP_SET_CFG"
+	},
+
+	{
+		WL_AP_STA_LIST,
+		IW_PRIV_TYPE_CHAR | 0,
+		IW_PRIV_TYPE_CHAR | 1024,
+		"AP_GET_STA_LIST"
+	},
+
+	{
+		WL_AP_MAC_FLTR,
+		IW_PRIV_TYPE_CHAR | 256,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+		"AP_SET_MAC_FLTR"
+	},
+
+	{
+		WL_AP_BSS_START,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"AP_BSS_START"
+	},
+
+	{
+		AP_LPB_CMD,
+		IW_PRIV_TYPE_CHAR | 256,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+		"AP_LPB_CMD"
+	},
+
+	{
+		WL_AP_STOP,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+		"AP_BSS_STOP"
+	},
+
+	{
+		WL_FW_RELOAD,
+		IW_PRIV_TYPE_CHAR | 256,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+		"WL_FW_RELOAD"
+	},
+
+	{
+		WL_AP_STA_DISASSOC,
+		IW_PRIV_TYPE_CHAR | 256,
+		IW_PRIV_TYPE_CHAR | 0,
+		"AP_STA_DISASSOC"
+	},
+#endif
+#if defined(CSCAN)
+	{
+		WL_COMBO_SCAN,
+		IW_PRIV_TYPE_CHAR | 1024,
+		0,
+		"CSCAN"
+	},
+#endif
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+	.num_standard = ARRAYSIZE(wl_iw_handler),
+	.standard = (iw_handler *) wl_iw_handler,
+	.num_private = ARRAYSIZE(wl_iw_priv_handler),
+	.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+	.private = (iw_handler *)wl_iw_priv_handler,
+	.private_args = (void *) wl_iw_priv_args,
+
+#if WIRELESS_EXT >= 19
+	get_wireless_stats: dhd_get_wireless_stats,
+#endif 
+};
+#endif 
+
+
+int wl_iw_ioctl(
+	struct net_device *dev,
+	struct ifreq *rq,
+	int cmd
+)
+{
+	struct iwreq *wrq = (struct iwreq *) rq;
+	struct iw_request_info info;
+	iw_handler handler;
+	char *extra = NULL;
+	int token_size = 1, max_tokens = 0, ret = 0;
+
+	net_os_wake_lock(dev);
+
+	WL_TRACE(("%s: cmd:%x alled via dhd->do_ioctl()entry point\n", __FUNCTION__, cmd));
+	if (cmd < SIOCIWFIRST ||
+		IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+		!(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) {
+			WL_ERROR(("%s: error in cmd=%x : not supported\n", __FUNCTION__, cmd));
+			net_os_wake_unlock(dev);
+			return -EOPNOTSUPP;
+	}
+
+	switch (cmd) {
+
+	case SIOCSIWESSID:
+	case SIOCGIWESSID:
+	case SIOCSIWNICKN:
+	case SIOCGIWNICKN:
+		max_tokens = IW_ESSID_MAX_SIZE + 1;
+		break;
+
+	case SIOCSIWENCODE:
+	case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+	case SIOCSIWENCODEEXT:
+	case SIOCGIWENCODEEXT:
+#endif
+		max_tokens = wrq->u.data.length;
+		break;
+
+	case SIOCGIWRANGE:
+		max_tokens = sizeof(struct iw_range) + 500;
+		break;
+
+	case SIOCGIWAPLIST:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_AP;
+		break;
+
+#if WIRELESS_EXT > 13
+	case SIOCGIWSCAN:
+#if defined(WL_IW_USE_ISCAN)
+	if (g_iscan)
+		max_tokens = wrq->u.data.length;
+	else
+#endif
+		max_tokens = IW_SCAN_MAX_DATA;
+		break;
+#endif 
+
+	case SIOCSIWSPY:
+		token_size = sizeof(struct sockaddr);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+	case SIOCGIWSPY:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+#if WIRELESS_EXT > 17
+	case SIOCSIWPMKSA:
+	case SIOCSIWGENIE:
+#endif 
+	case SIOCSIWPRIV:
+		max_tokens = wrq->u.data.length;
+		break;
+	}
+
+	if (max_tokens && wrq->u.data.pointer) {
+		if (wrq->u.data.length > max_tokens) {
+			WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d  > max_tokens=%d\n", \
+				__FUNCTION__, cmd, wrq->u.data.length, max_tokens));
+			ret = -E2BIG;
+			goto wl_iw_ioctl_done;
+		}
+		if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) {
+			ret = -ENOMEM;
+			goto wl_iw_ioctl_done;
+		}
+
+		if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			ret = -EFAULT;
+			goto wl_iw_ioctl_done;
+		}
+	}
+
+	info.cmd = cmd;
+	info.flags = 0;
+
+	ret = handler(dev, &info, &wrq->u, extra);
+
+	if (extra) {
+		if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			ret = -EFAULT;
+			goto wl_iw_ioctl_done;
+		}
+
+		kfree(extra);
+	}
+
+wl_iw_ioctl_done:
+
+	net_os_wake_unlock(dev);
+
+	return ret;
+}
+
+
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen)
+{
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			
+		uint32 inStatus;		
+		uint32 inReason;		
+		const char* outName;	
+		const char* outCause;	
+	} conn_fail_event_map_t;
+
+	
+#	define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map [] = {
+		
+		
+		{WLC_E_SET_SSID,     WLC_E_STATUS_SUCCESS,   WL_IW_DONT_CARE,
+		"Conn", "Success"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+		"Conn", "NoNetworks"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ConfigMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_PRUNE_ENCR_MISMATCH,
+		"Conn", "EncrypMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_RSN_MISMATCH,
+		"Conn", "RsnMismatch"},
+		{WLC_E_AUTH,         WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "AuthTimeout"},
+		{WLC_E_AUTH,         WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "AuthFail"},
+		{WLC_E_AUTH,         WLC_E_STATUS_NO_ACK,    WL_IW_DONT_CARE,
+		"Conn", "AuthNoAck"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ReassocFail"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "ReassocTimeout"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_ABORT,     WL_IW_DONT_CARE,
+		"Conn", "ReassocAbort"},
+		{WLC_E_PSK_SUP,      WLC_SUP_KEYED,          WL_IW_DONT_CARE,
+		"Sup", "ConnSuccess"},
+		{WLC_E_PSK_SUP,      WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Sup", "WpaHandshakeFail"},
+		{WLC_E_DEAUTH_IND,   WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Deauth"},
+		{WLC_E_DISASSOC_IND, WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "DisassocInd"},
+		{WLC_E_DISASSOC,     WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Disassoc"}
+	};
+
+	const char* name = "";
+	const char* cause = NULL;
+	int i;
+
+	
+	for (i = 0;  i < sizeof(event_map)/sizeof(event_map[0]);  i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			name = row->outName;
+			cause = row->outCause;
+			break;
+		}
+	}
+
+	
+	if (cause) {
+		memset(stringBuf, 0, buflen);
+		snprintf(stringBuf, buflen, "%s %s %02d %02d",
+			name, cause, status, reason);
+		WL_INFORM(("Connection status: %s\n", stringBuf));
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#if WIRELESS_EXT > 14
+
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+	uint32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+
+	if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+		return TRUE;
+	}
+	else
+		return FALSE;
+}
+#endif
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 
+#endif
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd = 0;
+	uint32 event_type = ntoh32(e->event_type);
+	uint16 flags =  ntoh16(e->flags);
+	uint32 datalen = ntoh32(e->datalen);
+	uint32 status =  ntoh32(e->status);
+	uint32 toto;
+#if defined(ROAM_NOT_USED)
+	static uint32 roam_no_success = 0;
+	static bool roam_no_success_send = FALSE;
+#endif
+	memset(&wrqu, 0, sizeof(wrqu));
+	memset(extra, 0, sizeof(extra));
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_TRACE(("%s: dev=%s event=%d \n", __FUNCTION__, dev->name, event_type));
+
+	switch (event_type) {
+
+	case WLC_E_RELOAD:
+		WL_ERROR(("%s: Firmware ERROR %d\n", __FUNCTION__, status));
+		net_os_send_hang_message(dev);
+		goto wl_iw_event_end;
+
+#if defined(SOFTAP)
+	case WLC_E_PRUNE:
+		if (ap_cfg_running) {
+			char *macaddr = (char *)&e->addr;
+			WL_SOFTAP(("PRUNE received, %02X:%02X:%02X:%02X:%02X:%02X!\n",
+				macaddr[0], macaddr[1], macaddr[2], macaddr[3], \
+				macaddr[4], macaddr[5]));
+
+			if (ap_macmode) {
+				int i;
+				for (i = 0; i < ap_black_list.count; i++) {
+					if (!bcmp(macaddr, &ap_black_list.ea[i], \
+						sizeof(struct ether_addr))) {
+						WL_SOFTAP(("mac in black list, ignore it\n"));
+						break;
+					}
+				}
+
+				if (i == ap_black_list.count) {
+					char mac_buf[32] = {0};
+					sprintf(mac_buf, "STA_BLOCK %02X:%02X:%02X:%02X:%02X:%02X",
+						macaddr[0], macaddr[1], macaddr[2],
+						macaddr[3], macaddr[4], macaddr[5]);
+					wl_iw_send_priv_event(priv_dev, mac_buf);
+				}
+			}
+		}
+		break;
+#endif
+	case WLC_E_TXFAIL:
+		cmd = IWEVTXDROP;
+		memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		break;
+#if WIRELESS_EXT > 14
+	case WLC_E_JOIN:
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+#if defined(SOFTAP)
+		WL_SOFTAP(("STA connect received %d\n", event_type));
+		if (ap_cfg_running) {
+			wl_iw_send_priv_event(priv_dev, "STA_JOIN");
+			goto wl_iw_event_end;
+		}
+#endif
+		memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		cmd = IWEVREGISTERED;
+		break;
+	case WLC_E_ROAM:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			memcpy(wrqu.addr.sa_data, &e->addr.octet, ETHER_ADDR_LEN);
+			wrqu.addr.sa_family = ARPHRD_ETHER;
+			cmd = SIOCGIWAP;
+		}
+#if defined(ROAM_NOT_USED)
+		else if (status == WLC_E_STATUS_NO_NETWORKS) {
+			roam_no_success++;
+			if ((roam_no_success == 5) && (roam_no_success_send == FALSE)) {
+				roam_no_success_send = TRUE;
+				bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+				bzero(&extra, ETHER_ADDR_LEN);
+				cmd = SIOCGIWAP;
+				WL_ERROR(("%s  ROAMING did not succeeded , send Link Down\n", \
+					__FUNCTION__));
+			} else {
+				WL_TRACE(("##### ROAMING did not succeeded %d\n", roam_no_success));
+				goto wl_iw_event_end;
+			}
+		}
+#endif
+		break;
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+#if defined(SOFTAP)
+		WL_SOFTAP(("STA disconnect received %d\n", event_type));
+		if (ap_cfg_running) {
+			wl_iw_send_priv_event(priv_dev, "STA_LEAVE");
+			goto wl_iw_event_end;
+		}
+#endif
+		cmd = SIOCGIWAP;
+		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		bzero(&extra, ETHER_ADDR_LEN);
+		break;
+	case WLC_E_LINK:
+	case WLC_E_NDIS_LINK:
+		cmd = SIOCGIWAP;
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+#ifdef SOFTAP
+#ifdef AP_ONLY
+		if (ap_cfg_running) {
+#else
+		if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif	
+				WL_SOFTAP(("AP DOWN %d\n", event_type));
+				wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+			} else {
+				WL_TRACE(("STA_Link Down\n"));
+				g_ss_cache_ctrl.m_link_down = 1;
+			}
+#else
+			g_ss_cache_ctrl.m_link_down = 1;
+#endif
+			WL_TRACE(("Link Down\n"));
+
+			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+			bzero(&extra, ETHER_ADDR_LEN);
+		}
+		else {
+			memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+			g_ss_cache_ctrl.m_link_down = 0;
+
+			memcpy(g_ss_cache_ctrl.m_active_bssid, &e->addr, ETHER_ADDR_LEN);
+
+#ifdef SOFTAP
+#ifdef AP_ONLY
+			if (ap_cfg_running) {
+#else
+			if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif
+				WL_SOFTAP(("AP UP %d\n", event_type));
+				wl_iw_send_priv_event(priv_dev, "AP_UP");
+			} else {
+				WL_TRACE(("STA_LINK_UP\n"));
+#if defined(ROAM_NOT_USED)
+				roam_no_success_send = FALSE;
+				roam_no_success = 0;
+#endif
+			}
+#endif
+			WL_TRACE(("Link UP\n"));
+
+		}
+		net_os_wake_lock_timeout_enable(dev);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		break;
+	case WLC_E_ACTION_FRAME:
+		cmd = IWEVCUSTOM;
+		if (datalen + 1 <= sizeof(extra)) {
+			wrqu.data.length = datalen + 1;
+			extra[0] = WLC_E_ACTION_FRAME;
+			memcpy(&extra[1], data, datalen);
+			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+		}
+		break;
+
+	case WLC_E_ACTION_FRAME_COMPLETE:
+		cmd = IWEVCUSTOM;
+		memcpy(&toto, data, 4);
+		if (sizeof(status) + 1 <= sizeof(extra)) {
+			wrqu.data.length = sizeof(status) + 1;
+			extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+			memcpy(&extra[1], &status, sizeof(status));
+			printf("wl_iw_event status %d PacketId %d \n", status, toto);
+			printf("WLC_E_ACTION_FRAME_COMPLETE len %d \n", wrqu.data.length);
+		}
+		break;
+#endif 
+#if WIRELESS_EXT > 17
+	case WLC_E_MIC_ERROR: {
+		struct	iw_michaelmicfailure  *micerrevt = (struct  iw_michaelmicfailure  *)&extra;
+		cmd = IWEVMICHAELMICFAILURE;
+		wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+		if (flags & WLC_EVENT_MSG_GROUP)
+			micerrevt->flags |= IW_MICFAILURE_GROUP;
+		else
+			micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+		memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+		break;
+	}
+#ifdef BCMWPA2
+	case WLC_E_PMKID_CACHE: {
+		if (data)
+		{
+			struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+			pmkid_cand_list_t *pmkcandlist;
+			pmkid_cand_t	*pmkidcand;
+			int count;
+
+			cmd = IWEVPMKIDCAND;
+			pmkcandlist = data;
+			count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+			ASSERT(count >= 0);
+			wrqu.data.length = sizeof(struct iw_pmkid_cand);
+			pmkidcand = pmkcandlist->pmkid_cand;
+			while (count) {
+				bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+				if (pmkidcand->preauth)
+					iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+				bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+					ETHER_ADDR_LEN);
+#ifndef SANDGATE2G
+				wireless_send_event(dev, cmd, &wrqu, extra);
+#endif
+				pmkidcand++;
+				count--;
+			}
+		}
+		goto wl_iw_event_end;
+	}
+#endif 
+#endif 
+
+	case WLC_E_SCAN_COMPLETE:
+#if defined(WL_IW_USE_ISCAN)
+		if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+			(g_iscan->iscan_state != ISCAN_STATE_IDLE))
+		{
+			up(&g_iscan->sysioc_sem);
+		} else {
+			cmd = SIOCGIWSCAN;
+			wrqu.data.length = strlen(extra);
+			WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific scan %d\n", \
+				g_iscan->iscan_state));
+		}
+#else
+		cmd = SIOCGIWSCAN;
+		wrqu.data.length = strlen(extra);
+		WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n"));
+#endif
+		break;
+
+	case WLC_E_PFN_NET_FOUND:
+	{
+		wlc_ssid_t	* ssid;
+		ssid = (wlc_ssid_t *)data;
+		WL_TRACE(("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n", \
+			__FUNCTION__, PNO_EVENT_UP, ssid->SSID, ssid->SSID_len));
+		net_os_wake_lock_timeout_enable(dev);
+		cmd = IWEVCUSTOM;
+		memset(&wrqu, 0, sizeof(wrqu));
+		strcpy(extra, PNO_EVENT_UP);
+		wrqu.data.length = strlen(extra);
+	}
+	break;
+
+	default:
+		
+		WL_TRACE(("Unknown Event %d: ignoring\n", event_type));
+		break;
+	}
+#ifndef SANDGATE2G
+	if (cmd) {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+		if (cmd == SIOCGIWSCAN)
+			wireless_send_event(dev, cmd, &wrqu, NULL);
+		else
+#endif
+		wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+#endif
+
+#if WIRELESS_EXT > 14
+	
+	memset(extra, 0, sizeof(extra));
+	if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+		cmd = IWEVCUSTOM;
+		wrqu.data.length = strlen(extra);
+#ifndef SANDGATE2G
+		wireless_send_event(dev, cmd, &wrqu, extra);
+#endif
+	}
+#endif
+wl_iw_event_end:
+	net_os_wake_unlock(dev);
+#endif
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+	int res = 0;
+	wl_cnt_t cnt;
+	int phy_noise;
+	int rssi;
+	scb_val_t scb_val;
+
+	phy_noise = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+		goto done;
+
+	phy_noise = dtoh32(phy_noise);
+	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise));
+
+	bzero(&scb_val, sizeof(scb_val_t));
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+		goto done;
+
+	rssi = dtoh32(scb_val.val);
+	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi));
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		wstats->qual.qual = 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		wstats->qual.qual = 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		wstats->qual.qual = 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		wstats->qual.qual = 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		wstats->qual.qual = 4;
+	else
+		wstats->qual.qual = 5;
+
+	
+	wstats->qual.level = 0x100 + rssi;
+	wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+	wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+	wstats->qual.updated |= 7;
+#endif 
+
+#if WIRELESS_EXT > 11
+	WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n", (int)sizeof(wl_cnt_t)));
+
+	memset(&cnt, 0, sizeof(wl_cnt_t));
+	res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+	if (res)
+	{
+		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n", res));
+		goto done;
+	}
+
+	cnt.version = dtoh16(cnt.version);
+	if (cnt.version != WL_CNT_T_VERSION) {
+		WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+			WL_CNT_T_VERSION, cnt.version));
+		goto done;
+	}
+
+	wstats->discard.nwid = 0;
+	wstats->discard.code = dtoh32(cnt.rxundec);
+	wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+	wstats->discard.retries = dtoh32(cnt.txfail);
+	wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+	wstats->miss.beacon = 0;
+
+	WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+		dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif 
+
+done:
+	return res;
+}
+static void
+wl_iw_bt_flag_set(
+	struct net_device *dev,
+	bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+	char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+	char buf_flag7_default[8]   = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_lock();
+#endif
+
+#if defined(BT_DHCP_eSCO_FIX)
+	set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+	WL_TRACE_COEX(("WI-FI priority boost via bt flags, set:%d\n", set));
+	if (set == TRUE) {
+		dev_wlc_bufvar_set(dev, "btc_flags",
+					(char *)&buf_flag7_dhcp_on[0], sizeof(buf_flag7_dhcp_on));
+	}
+	else  {
+		dev_wlc_bufvar_set(dev, "btc_flags",
+					(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+	}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_unlock();
+#endif
+}
+
+static void
+wl_iw_bt_timerfunc(ulong data)
+{
+	bt_info_t  *bt_local = (bt_info_t *)data;
+	bt_local->timer_on = 0;
+	WL_TRACE(("%s\n", __FUNCTION__));
+
+	up(&bt_local->bt_sem);
+}
+
+static int
+_bt_dhcp_sysioc_thread(void *data)
+{
+	DAEMONIZE("dhcp_sysioc");
+
+	while (down_interruptible(&g_bt->bt_sem) == 0) {
+
+		net_os_wake_lock(g_bt->dev);
+
+		if (g_bt->timer_on) {
+			g_bt->timer_on = 0;
+			del_timer_sync(&g_bt->timer);
+		}
+
+		switch (g_bt->bt_state) {
+			case BT_DHCP_START:
+				WL_TRACE_COEX(("%s bt_dhcp stm: started \n", __FUNCTION__));
+				g_bt->bt_state = BT_DHCP_OPPORTUNITY_WINDOW;
+				mod_timer(&g_bt->timer, jiffies + BT_DHCP_OPPORTUNITY_WINDOW_TIME*HZ/1000);
+				g_bt->timer_on = 1;
+				break;
+
+			case BT_DHCP_OPPORTUNITY_WINDOW:
+				if (g_bt->dhcp_done) {
+					WL_TRACE_COEX(("%s DHCP Done before T1 expiration\n", \
+						__FUNCTION__));
+					g_bt->bt_state = BT_DHCP_IDLE;
+					g_bt->timer_on = 0;
+					break;
+				}
+
+				WL_TRACE_COEX(("%s DHCP T1:%d expired\n", \
+						__FUNCTION__, BT_DHCP_OPPORTUNITY_WINDOW_TIME));
+				if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, TRUE);
+				g_bt->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+				mod_timer(&g_bt->timer, jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+				g_bt->timer_on = 1;
+				break;
+
+			case BT_DHCP_FLAG_FORCE_TIMEOUT:
+				if (g_bt->dhcp_done) {
+					WL_TRACE_COEX(("%s DHCP Done before T2 expiration\n", \
+						__FUNCTION__));
+				} else {
+					WL_TRACE_COEX(("%s DHCP wait interval T2:%d msec expired\n",
+						__FUNCTION__, BT_DHCP_FLAG_FORCE_TIME));
+				}
+
+				if (g_bt->dev)  wl_iw_bt_flag_set(g_bt->dev, FALSE);
+				g_bt->bt_state = BT_DHCP_IDLE;
+				g_bt->timer_on = 0;
+				break;
+
+			default:
+				WL_ERROR(("%s error g_status=%d !!!\n", __FUNCTION__, \
+				          g_bt->bt_state));
+				if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE);
+				g_bt->bt_state = BT_DHCP_IDLE;
+				g_bt->timer_on = 0;
+				break;
+		}
+
+		net_os_wake_unlock(g_bt->dev);
+	}
+
+	if (g_bt->timer_on) {
+		g_bt->timer_on = 0;
+		del_timer_sync(&g_bt->timer);
+	}
+
+	complete_and_exit(&g_bt->bt_exited, 0);
+}
+
+static void
+wl_iw_bt_release(void)
+{
+	bt_info_t *bt_local = g_bt;
+
+	if (!bt_local) {
+		return;
+	}
+
+	if (bt_local->bt_pid >= 0) {
+		KILL_PROC(bt_local->bt_pid, SIGTERM);
+		wait_for_completion(&bt_local->bt_exited);
+	}
+	kfree(bt_local);
+	g_bt = NULL;
+}
+
+static int
+wl_iw_bt_init(struct net_device *dev)
+{
+	bt_info_t *bt_dhcp = NULL;
+
+	bt_dhcp = kmalloc(sizeof(bt_info_t), GFP_KERNEL);
+	if (!bt_dhcp)
+		return -ENOMEM;
+
+	memset(bt_dhcp, 0, sizeof(bt_info_t));
+	bt_dhcp->bt_pid = -1;
+	g_bt = bt_dhcp;
+	bt_dhcp->dev = dev;
+	bt_dhcp->bt_state = BT_DHCP_IDLE;
+
+	
+	bt_dhcp->timer_ms    = 10;
+	init_timer(&bt_dhcp->timer);
+	bt_dhcp->timer.data = (ulong)bt_dhcp;
+	bt_dhcp->timer.function = wl_iw_bt_timerfunc;
+
+	sema_init(&bt_dhcp->bt_sem, 0);
+	init_completion(&bt_dhcp->bt_exited);
+	bt_dhcp->bt_pid = kernel_thread(_bt_dhcp_sysioc_thread, bt_dhcp, 0);
+	if (bt_dhcp->bt_pid < 0) {
+		WL_ERROR(("Failed in %s\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int wl_iw_attach(struct net_device *dev, void *dhdp)
+{
+	int params_size;
+	wl_iw_t *iw;
+#if defined(WL_IW_USE_ISCAN)
+	iscan_info_t *iscan = NULL;
+#endif
+
+	mutex_init(&wl_cache_lock);
+
+#if defined(WL_IW_USE_ISCAN)
+	if (!dev)
+		return 0;
+
+	memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t));
+
+#ifdef CSCAN
+	params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)) +
+	    (WL_NUMCHANNELS * sizeof(uint16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+#else
+	params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+#endif 
+	iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+	if (!iscan)
+		return -ENOMEM;
+	memset(iscan, 0, sizeof(iscan_info_t));
+	
+	iscan->iscan_ex_params_p = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+	if (!iscan->iscan_ex_params_p)
+		return -ENOMEM;
+	iscan->iscan_ex_param_size = params_size;
+	iscan->sysioc_pid = -1;
+	
+	g_iscan = iscan;
+	iscan->dev = dev;
+	iscan->iscan_state = ISCAN_STATE_IDLE;
+#if defined(CONFIG_FIRST_SCAN)
+	g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+	g_first_counter_scans = 0;
+	g_iscan->scan_flag = 0;
+#endif
+
+	iscan->timer_ms    = 8000;
+	init_timer(&iscan->timer);
+	iscan->timer.data = (ulong)iscan;
+	iscan->timer.function = wl_iw_timerfunc;
+
+	sema_init(&iscan->sysioc_sem, 0);
+	init_completion(&iscan->sysioc_exited);
+	iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+	if (iscan->sysioc_pid < 0)
+		return -ENOMEM;
+#endif
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+	iw->pub = (dhd_pub_t *)dhdp;
+#ifdef SOFTAP
+	priv_dev = dev;
+#endif 
+	g_scan = NULL;
+
+	g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+	if (!g_scan)
+		return -ENOMEM;
+
+	memset(g_scan, 0, G_SCAN_RESULTS);
+	g_scan_specified_ssid = 0;
+
+#if !defined(CSCAN)
+	wl_iw_init_ss_cache_ctrl();
+#endif
+
+	wl_iw_bt_init(dev);
+
+	return 0;
+}
+
+void wl_iw_detach(void)
+{
+#if defined(WL_IW_USE_ISCAN)
+	iscan_buf_t  *buf;
+	iscan_info_t *iscan = g_iscan;
+
+	if (!iscan)
+		return;
+	if (iscan->sysioc_pid >= 0) {
+		KILL_PROC(iscan->sysioc_pid, SIGTERM);
+		wait_for_completion(&iscan->sysioc_exited);
+	}
+	mutex_lock(&wl_cache_lock);
+	while (iscan->list_hdr) {
+		buf = iscan->list_hdr->next;
+		kfree(iscan->list_hdr);
+		iscan->list_hdr = buf;
+	}
+	kfree(iscan->iscan_ex_params_p);
+	kfree(iscan);
+	g_iscan = NULL;
+	mutex_unlock(&wl_cache_lock);
+#endif
+
+	if (g_scan)
+		kfree(g_scan);
+
+	g_scan = NULL;
+#if !defined(CSCAN)
+	wl_iw_release_ss_cache_ctrl();
+#endif
+	wl_iw_bt_release();
+#ifdef SOFTAP
+	if (ap_cfg_running) {
+		WL_TRACE(("\n%s AP is going down\n", __FUNCTION__));
+		wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+	}
+#endif
+}
diff --git a/drivers/net/wireless/bcm4329/wl_iw.h b/drivers/net/wireless/bcm4329/wl_iw.h
new file mode 100644
index 0000000..d94bdb4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/wl_iw.h
@@ -0,0 +1,306 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2010, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h,v 1.5.34.1.6.36.4.18 2011/02/10 19:33:12 Exp $
+ */
+
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+#define GET_SSID			"SSID="
+#define GET_CHANNEL			"CH="
+#define GET_NPROBE 			"NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL  	"ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL  	"PASSIVE="
+#define GET_HOME_DWELL  		"HOME="
+#define GET_SCAN_TYPE			"TYPE="
+
+#define BAND_GET_CMD				"GETBAND"
+#define BAND_SET_CMD				"SETBAND"
+#define DTIM_SKIP_GET_CMD			"DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD			"DTIMSKIPSET"
+#define SETSUSPEND_CMD				"SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD			"PNOSSIDCLR"
+#define PNOSETUP_SET_CMD			"PNOSETUP "
+#define PNOENABLE_SET_CMD			"PNOFORCE"
+#define PNODEBUG_SET_CMD			"PNODEBUG"
+#define TXPOWER_SET_CMD				"TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+typedef struct wl_iw_extra_params {
+	int 	target_channel;
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];
+	char custom_locale[WLC_CNTRY_BUF_SZ];
+	int32 custom_locale_rev;
+};
+
+#define	WL_IW_RSSI_MINVAL	-200
+#define	WL_IW_RSSI_NO_SIGNAL	-91
+#define	WL_IW_RSSI_VERY_LOW	-80
+#define	WL_IW_RSSI_LOW		-70
+#define	WL_IW_RSSI_GOOD		-68
+#define	WL_IW_RSSI_VERY_GOOD	-58
+#define	WL_IW_RSSI_EXCELLENT	-57
+#define	WL_IW_RSSI_INVALID	 0
+#define MAX_WX_STRING		80
+#define isprint(c)		bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN	(SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI		(SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN	(SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED	(SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR	(SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP		(SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START		(SIOCIWFIRSTPRIV+13)
+
+
+#define WL_SET_AP_CFG           (SIOCIWFIRSTPRIV+15)
+#define WL_AP_STA_LIST          (SIOCIWFIRSTPRIV+17)
+#define WL_AP_MAC_FLTR	        (SIOCIWFIRSTPRIV+19)
+#define WL_AP_BSS_START         (SIOCIWFIRSTPRIV+21)
+#define AP_LPB_CMD              (SIOCIWFIRSTPRIV+23)
+#define WL_AP_STOP              (SIOCIWFIRSTPRIV+25)
+#define WL_FW_RELOAD            (SIOCIWFIRSTPRIV+27)
+#define WL_AP_STA_DISASSOC		(SIOCIWFIRSTPRIV+29)
+#define WL_COMBO_SCAN           (SIOCIWFIRSTPRIV+31)
+
+#define G_SCAN_RESULTS		(8*1024)
+#define WE_ADD_EVENT_FIX	0x80
+#define G_WLAN_SET_ON		0
+#define G_WLAN_SET_OFF		1
+
+#define CHECK_EXTRA_FOR_NULL(extra) \
+if (!extra) { \
+	WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \
+	return -EINVAL; \
+}
+
+typedef struct wl_iw {
+	char nickname[IW_ESSID_MAX_SIZE];
+
+	struct iw_statistics wstats;
+
+	int spy_num;
+	uint32 pwsec;
+	uint32 gwsec;
+	bool privacy_invoked;
+
+	struct ether_addr spy_addr[IW_MAX_SPY];
+	struct iw_quality spy_qual[IW_MAX_SPY];
+	void  *wlinfo;
+	dhd_pub_t * pub;
+} wl_iw_t;
+
+#define WLC_IW_SS_CACHE_MAXLEN				2048
+#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN	32
+#define WLC_IW_BSS_INFO_MAXLEN 				\
+	(WLC_IW_SS_CACHE_MAXLEN - WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN)
+
+typedef struct wl_iw_ss_cache {
+	struct wl_iw_ss_cache *next;
+	int dirty;
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_iw_ss_cache_t;
+
+typedef struct wl_iw_ss_cache_ctrl {
+	wl_iw_ss_cache_t *m_cache_head;
+	int m_link_down;
+	int m_timer_expired;
+	char m_active_bssid[ETHER_ADDR_LEN];
+	uint m_prev_scan_mode;
+	uint m_cons_br_scan_cnt;
+	struct timer_list *m_timer;
+} wl_iw_ss_cache_ctrl_t;
+
+typedef enum broadcast_first_scan {
+	BROADCAST_SCAN_FIRST_IDLE = 0,
+	BROADCAST_SCAN_FIRST_STARTED,
+	BROADCAST_SCAN_FIRST_RESULT_READY,
+	BROADCAST_SCAN_FIRST_RESULT_CONSUMED
+} broadcast_first_scan_t;
+#ifdef SOFTAP
+#define SSID_LEN	33
+#define SEC_LEN		16
+#define KEY_LEN		65
+#define PROFILE_OFFSET	32
+struct ap_profile {
+	uint8	ssid[SSID_LEN];
+	uint8	sec[SEC_LEN];
+	uint8	key[KEY_LEN];
+	uint32	channel;
+	uint32	preamble;
+	uint32	max_scb;
+	uint32  closednet;
+	char country_code[WLC_CNTRY_BUF_SZ];
+};
+
+
+#define MACLIST_MODE_DISABLED		0
+#define MACLIST_MODE_DENY		1
+#define MACLIST_MODE_ALLOW		2
+struct mflist {
+	uint count;
+	struct ether_addr ea[16];
+};
+
+struct mac_list_set {
+	uint32	mode;
+	struct mflist mac_list;
+};
+#endif
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+void wl_iw_detach(void);
+int wl_control_wl_start(struct net_device *dev);
+
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_packet_filter(struct net_device *dev, int val);
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+extern char *dhd_bus_country_get(struct net_device *dev);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, \
+					    ushort  scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_dev_pno_reset(struct net_device *dev);
+extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, \
+				 int nssid, ushort  scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_dev_pno_enable(struct net_device *dev,  int pfn_enabled);
+extern int dhd_dev_get_pno_status(struct net_device *dev);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+
+#define PNO_TLV_PREFIX			'S'
+#define PNO_TLV_VERSION			'1'
+#define PNO_TLV_SUBVERSION 		'2'
+#define PNO_TLV_RESERVED		'0'
+#define PNO_TLV_TYPE_SSID_IE		'S'
+#define PNO_TLV_TYPE_TIME		'T'
+#define PNO_TLV_FREQ_REPEAT		'R'
+#define PNO_TLV_FREQ_EXPO_MAX	'M'
+#define PNO_EVENT_UP			"PNO_EVENT"
+
+typedef struct cmd_tlv {
+	char prefix;
+	char version;
+	char subver;
+	char reserved;
+} cmd_tlv_t;
+
+#ifdef SOFTAP_TLV_CFG
+#define SOFTAP_SET_CMD				"SOFTAPSET "
+#define SOFTAP_TLV_PREFIX			'A'
+#define SOFTAP_TLV_VERSION			'1'
+#define SOFTAP_TLV_SUBVERSION			'0'
+#define SOFTAP_TLV_RESERVED			'0'
+
+#define TLV_TYPE_SSID				'S'
+#define TLV_TYPE_SECUR				'E'
+#define TLV_TYPE_KEY				'K'
+#define TLV_TYPE_CHANNEL			'C'
+#endif
+
+#if defined(CSCAN)
+
+typedef struct cscan_tlv {
+	char prefix;
+	char version;
+	char subver;
+	char reserved;
+} cscan_tlv_t;
+
+#define CSCAN_COMMAND				"CSCAN "
+#define CSCAN_TLV_PREFIX 			'S'
+#define CSCAN_TLV_VERSION			1
+#define CSCAN_TLV_SUBVERSION			0
+#define CSCAN_TLV_TYPE_SSID_IE			'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE		'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE		'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE		'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE		'P'
+#define CSCAN_TLV_TYPE_HOME_IE			'H'
+#define CSCAN_TLV_TYPE_STYPE_IE			'T'
+
+extern int wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, \
+					int channel_num, int *bytes_left);
+
+extern int wl_iw_parse_data_tlv(char** list_str, void  *dst, int dst_size, \
+					const char token, int input_size, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, \
+					int max, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max);
+
+extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num);
+
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 0000000..db434ab
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,33 @@
+config BCMDHD
+	tristate "Broadcom 4329/30 wireless cards support"
+	depends on MMC
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom 4329/30 chipset.
+
+	  This driver uses the kernel's wireless extensions subsystem.
+
+	  If you choose to build a module, it'll be called dhd. Say M if
+	  unsure.
+
+config BCMDHD_FW_PATH
+	depends on BCMDHD
+	string "Firmware path"
+	default "/system/etc/firmware/fw_bcmdhd.bin"
+	---help---
+	  Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+	depends on BCMDHD
+	string "NVRAM path"
+	default "/system/etc/wifi/bcmdhd.cal"
+	---help---
+	  Path to the calibration file.
+
+config BCMDHD_WEXT
+	bool "Enable WEXT support"
+	depends on BCMDHD && CFG80211 = n
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	help
+	  Enables WEXT support
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 0000000..936d7ac
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,29 @@
+# bcmdhd
+DHDCFLAGS = -Wall -Wstrict-prototypes -Werror -Dlinux -DBCMDRIVER             \
+	-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DWLBTAMP -DBCMFILEIMAGE  \
+	-DDHDTHREAD -DDHD_GPL -DDHD_SCHED -DDHD_DEBUG -DSDTEST -DBDC -DTOE    \
+	-DDHD_BCMEVENTS -DSHOW_EVENTS -DDONGLEOVERLAYS -DBCMDBG               \
+	-DCUSTOMER_HW2 -DCUSTOM_OOB_GPIO_NUM=2 -DOOB_INTR_ONLY -DHW_OOB       \
+	-DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P     \
+	-DNEW_COMPAT_WIRELESS -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT          \
+	-DKEEP_ALIVE -DCSCAN -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT     \
+	-DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD                        \
+	-Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include
+
+DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o   \
+	dhd_linux_sched.o bcmwifi.o dhd_sdio.o bcmevent.o dhd_bta.o hndpmu.o  \
+	bcmsdh.o dhd_cdc.o bcmsdh_linux.o dhd_common.o linux_osl.o            \
+	bcmsdh_sdmmc.o dhd_custom_gpio.o sbutils.o wldev_common.o wl_android.o
+
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
+ifneq ($(CONFIG_WIRELESS_EXT),)
+bcmdhd-objs += wl_iw.o
+DHDCFLAGS += -DSOFTAP
+endif
+ifneq ($(CONFIG_CFG80211),)
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o
+DHDCFLAGS += -DWL_CFG80211
+endif
+EXTRA_CFLAGS = $(DHDCFLAGS)
+EXTRA_LDFLAGS += --strip-debug
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 0000000..059df89
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,675 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c,v 1.26.2.1 2010-03-09 18:41:21 Exp $
+ */
+
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+
+
+
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+	uint32 ent;
+	uint inv = 0, nom = 0;
+
+	while (TRUE) {
+		ent = R_REG(si_osh(sih), *eromptr);
+		(*eromptr)++;
+
+		if (mask == 0)
+			break;
+
+		if ((ent & ER_VALID) == 0) {
+			inv++;
+			continue;
+		}
+
+		if (ent == (ER_END | ER_VALID))
+			break;
+
+		if ((ent & mask) == match)
+			break;
+
+		nom++;
+	}
+
+	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+	if (inv + nom) {
+		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
+	}
+	return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+        uint32 *sizel, uint32 *sizeh)
+{
+	uint32 asd, sz, szd;
+
+	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+	if (((asd & ER_TAG1) != ER_ADD) ||
+	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+	    ((asd & AD_ST_MASK) != st)) {
+		
+		(*eromptr)--;
+		return 0;
+	}
+	*addrl = asd & AD_ADDR_MASK;
+	if (asd & AD_AG32)
+		*addrh = get_erom_ent(sih, eromptr, 0, 0);
+	else
+		*addrh = 0;
+	*sizeh = 0;
+	sz = asd & AD_SZ_MASK;
+	if (sz == AD_SZ_SZD) {
+		szd = get_erom_ent(sih, eromptr, 0, 0);
+		*sizel = szd & SD_SZ_MASK;
+		if (szd & SD_SG32)
+			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
+	} else
+		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+	return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc = (chipcregs_t *)regs;
+	uint32 erombase, *eromptr, *eromlim;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+		break;
+
+	case PCI_BUS:
+		
+		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+		
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		eromptr = regs;
+		break;
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		eromptr = (uint32 *)(uintptr)erombase;
+		break;
+
+	case PCMCIA_BUS:
+	default:
+		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+		ASSERT(0);
+		return;
+	}
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+	         regs, erombase, eromptr, eromlim));
+	while (eromptr < eromlim) {
+		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+		uint32 *base;
+		uint i, j, idx;
+		bool br;
+
+		br = FALSE;
+
+		
+		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+		if (cia == (ER_END | ER_VALID)) {
+			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+			ai_hwfixup(sii);
+			return;
+		}
+		base = eromptr - 1;
+		cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+		if ((cib & ER_TAG) != ER_CI) {
+			SI_ERROR(("CIA not followed by CIB\n"));
+			goto error;
+		}
+
+		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+		         "nsw = %d, nmp = %d & nsp = %d\n",
+		         mfg, cid, crev, base, nmw, nsw, nmp, nsp));
+
+		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+			continue;
+		if ((nmw + nsw == 0)) {
+			
+			if (cid == OOB_ROUTER_CORE_ID) {
+				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+					&addrl, &addrh, &sizel, &sizeh);
+				if (asd != 0) {
+					sii->oob_router = addrl;
+				}
+			}
+			continue;
+		}
+
+		idx = sii->numcores;
+
+		sii->cia[idx] = cia;
+		sii->cib[idx] = cib;
+		sii->coreid[idx] = cid;
+
+		for (i = 0; i < nmp; i++) {
+			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+			if ((mpd & ER_TAG) != ER_MP) {
+				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+				goto error;
+			}
+			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
+			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+		}
+
+		
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+		if (asd == 0) {
+			
+			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd != 0)
+				br = TRUE;
+			else
+				if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+					SI_ERROR(("First Slave ASD for core 0x%04x malformed "
+					          "(0x%08x)\n", cid, asd));
+					goto error;
+				}
+		}
+		sii->coresba[idx] = addrl;
+		sii->coresba_size[idx] = sizel;
+		
+		j = 1;
+		do {
+			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+				sii->coresba2[idx] = addrl;
+				sii->coresba2_size[idx] = sizel;
+			}
+			j++;
+		} while (asd != 0);
+
+		
+		for (i = 1; i < nsp; i++) {
+			j = 0;
+			do {
+				asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh,
+				              &sizel, &sizeh);
+			} while (asd != 0);
+			if (j == 0) {
+				SI_ERROR((" SP %d has no address descriptors\n", i));
+				goto error;
+			}
+		}
+
+		
+		for (i = 0; i < nmw; i++) {
+			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for MW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if (i == 0)
+				sii->wrapba[idx] = addrl;
+		}
+
+		
+		for (i = 0; i < nsw; i++) {
+			uint fwp = (nsp == 1) ? 0 : 1;
+			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for SW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if ((nmw == 0) && (i == 0))
+				sii->wrapba[idx] = addrl;
+		}
+
+		
+		if (br)
+			continue;
+
+		
+		sii->numcores++;
+	}
+
+	SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+	sii->numcores = 0;
+	return;
+}
+
+
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 addr = sii->coresba[coreidx];
+	uint32 wrap = sii->wrapba[coreidx];
+	void *regs;
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		
+		if (!sii->regs[coreidx]) {
+			sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->regs[coreidx]));
+		}
+		sii->curmap = regs = sii->regs[coreidx];
+		if (!sii->wrappers[coreidx]) {
+			sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->wrappers[coreidx]));
+		}
+		sii->curwrap = sii->wrappers[coreidx];
+		break;
+
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		sii->curmap = regs = (void *)((uintptr)addr);
+		sii->curwrap = (void *)((uintptr)wrap);
+		break;
+
+	case PCMCIA_BUS:
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	sii->curmap = regs;
+	sii->curidx = coreidx;
+
+	return regs;
+}
+
+
+int
+ai_numaddrspaces(si_t *sih)
+{
+	return 2;
+}
+
+
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+	uint cidx;
+
+	sii = SI_INFO(sih);
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return sii->coresba[cidx];
+	else if (asidx == 1)
+		return sii->coresba2[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+	uint cidx;
+
+	sii = SI_INFO(sih);
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return sii->coresba_size[cidx];
+	else if (asidx == 1)
+		return sii->coresba2_size[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+uint
+ai_flag(si_t *sih)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+
+	sii = SI_INFO(sih);
+	ai = sii->curwrap;
+
+	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 *map = (uint32 *) sii->curwrap;
+
+	if (mask || val) {
+		uint32 w = R_REG(sii->osh, map+(offset/4));
+		w &= ~mask;
+		w |= val;
+		W_REG(sii->osh, map+(offset/4), val);
+	}
+
+	return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	uint32 cia;
+
+	sii = SI_INFO(sih);
+	cia = sii->cia[sii->curidx];
+	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	uint32 cib;
+
+	sii = SI_INFO(sih);
+	cib = sii->cib[sii->curidx];
+	return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+
+	sii = SI_INFO(sih);
+	ai = sii->curwrap;
+
+	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		
+		fast = TRUE;
+		
+		if (!sii->regs[coreidx]) {
+			sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		
+
+		if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		
+		origidx = si_coreidx(&sii->pub);
+
+		
+		r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	
+	if (mask || val) {
+		w = (R_REG(sii->osh, r) & ~mask) | val;
+		W_REG(sii->osh, r, w);
+	}
+
+	
+	w = R_REG(sii->osh, r);
+
+	if (!fast) {
+		
+		if (origidx != coreidx)
+			ai_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	aidmp_t *ai;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	
+	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+		return;
+
+	W_REG(sii->osh, &ai->ioctrl, bits);
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	OSL_DELAY(10);
+
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	OSL_DELAY(1);
+}
+
+
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	
+	ai_core_disable(sih, (bits | resetbits));
+
+	
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	W_REG(sii->osh, &ai->resetctrl, 0);
+	OSL_DELAY(1);
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	OSL_DELAY(1);
+}
+
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+
+	return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	aidmp_t *ai;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+		W_REG(sii->osh, &ai->iostatus, w);
+	}
+
+	return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 0000000..3cee7c2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,122 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmevent.c,v 1.8.2.7 2011-02-01 06:23:39 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+#if WLC_E_LAST != 84
+#error "You need to add an entry to bcmevent_names[] for the new event"
+#endif
+
+const bcmevent_name_t bcmevent_names[] = {
+	{ WLC_E_SET_SSID, "SET_SSID" },
+	{ WLC_E_JOIN, "JOIN" },
+	{ WLC_E_START, "START" },
+	{ WLC_E_AUTH, "AUTH" },
+	{ WLC_E_AUTH_IND, "AUTH_IND" },
+	{ WLC_E_DEAUTH, "DEAUTH" },
+	{ WLC_E_DEAUTH_IND, "DEAUTH_IND" },
+	{ WLC_E_ASSOC, "ASSOC" },
+	{ WLC_E_ASSOC_IND, "ASSOC_IND" },
+	{ WLC_E_REASSOC, "REASSOC" },
+	{ WLC_E_REASSOC_IND, "REASSOC_IND" },
+	{ WLC_E_DISASSOC, "DISASSOC" },
+	{ WLC_E_DISASSOC_IND, "DISASSOC_IND" },
+	{ WLC_E_QUIET_START, "START_QUIET" },
+	{ WLC_E_QUIET_END, "END_QUIET" },
+	{ WLC_E_BEACON_RX, "BEACON_RX" },
+	{ WLC_E_LINK, "LINK" },
+	{ WLC_E_MIC_ERROR, "MIC_ERROR" },
+	{ WLC_E_NDIS_LINK, "NDIS_LINK" },
+	{ WLC_E_ROAM, "ROAM" },
+	{ WLC_E_TXFAIL, "TXFAIL" },
+	{ WLC_E_PMKID_CACHE, "PMKID_CACHE" },
+	{ WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF" },
+	{ WLC_E_PRUNE, "PRUNE" },
+	{ WLC_E_AUTOAUTH, "AUTOAUTH" },
+	{ WLC_E_EAPOL_MSG, "EAPOL_MSG" },
+	{ WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE" },
+	{ WLC_E_ADDTS_IND, "ADDTS_IND" },
+	{ WLC_E_DELTS_IND, "DELTS_IND" },
+	{ WLC_E_BCNSENT_IND, "BCNSENT_IND" },
+	{ WLC_E_BCNRX_MSG, "BCNRX_MSG" },
+	{ WLC_E_BCNLOST_MSG, "BCNLOST_IND" },
+	{ WLC_E_ROAM_PREP, "ROAM_PREP" },
+	{ WLC_E_PFN_NET_FOUND, "PFNFOUND_IND" },
+	{ WLC_E_PFN_NET_LOST, "PFNLOST_IND" },
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+	{ WLC_E_IBSS_ASSOC, "IBSS_ASSOC" },
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+	{ WLC_E_RADIO, "RADIO" },
+	{ WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG" },
+	{ WLC_E_PROBREQ_MSG, "PROBE_REQ_MSG" },
+	{ WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" },
+	{ WLC_E_PSK_SUP, "PSK_SUP" },
+	{ WLC_E_COUNTRY_CODE_CHANGED, "CNTRYCODE_IND" },
+	{ WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" },
+	{ WLC_E_ICV_ERROR, "ICV_ERROR" },
+	{ WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" },
+	{ WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" },
+	{ WLC_E_TRACE, "TRACE" },
+	{ WLC_E_BTA_HCI_EVENT, "BTA_HCI_EVENT" },
+	{ WLC_E_IF, "IF" },
+#ifdef WLP2P
+	{ WLC_E_P2P_DISC_LISTEN_COMPLETE, "WLC_E_P2P_DISC_LISTEN_COMPLETE" },
+#endif
+	{ WLC_E_RSSI, "RSSI" },
+	{ WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE" },
+	{ WLC_E_EXTLOG_MSG, "EXTERNAL LOG MESSAGE" },
+#ifdef WIFI_ACT_FRAME
+	{ WLC_E_ACTION_FRAME, "ACTION_FRAME" },
+	{ WLC_E_ACTION_FRAME_RX, "ACTION_FRAME_RX" },
+	{ WLC_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" },
+#endif
+	{ WLC_E_ESCAN_RESULT, "WLC_E_ESCAN_RESULT" },
+	{ WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "WLC_E_AF_OFF_CHAN_COMPLETE" },
+#ifdef WLP2P
+	{ WLC_E_PROBRESP_MSG, "PROBE_RESP_MSG" },
+	{ WLC_E_P2P_PROBREQ_MSG, "P2P PROBE_REQ_MSG" },
+#endif
+#ifdef PROP_TXSTATUS
+	{ WLC_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP" },
+#endif
+	{ WLC_E_WAKE_EVENT, "WAKE_EVENT" },
+	{ WLC_E_DCS_REQUEST, "DCS_REQUEST" },
+	{ WLC_E_RM_COMPLETE, "RM_COMPLETE" },
+#ifdef WLMEDIA_HTSF
+	{ WLC_E_HTSFSYNC, "HTSF_SYNC_EVENT" },
+#endif
+	{ WLC_E_OVERLAY_REQ, "OVERLAY_REQ_EVENT" },
+	{ WLC_E_CSA_COMPLETE_IND, "WLC_E_CSA_COMPLETE_IND" },
+	{ WLC_E_EXCESS_PM_WAKE_EVENT, "EXCESS_PM_WAKE_EVENT" },
+	{ WLC_E_PFN_SCAN_NONE, "PFN_SCAN_NONE" },
+	{ WLC_E_PFN_SCAN_ALLGONE, "PFN_SCAN_ALLGONE" }
+};
+
+
+const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 0000000..fc68b64
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,662 @@
+/*
+ *  BCMSDH interface glue
+ *  implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c,v 1.57.6.4 2010-12-23 01:13:15 Exp $
+ */
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h>	/* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h>	/* common SDIO/controller interface */
+#include <sbsdio.h>	/* BRCM sdio device core */
+
+#include <sdio.h>	/* sdio spec */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+
+struct bcmsdh_info
+{
+	bool	init_success;	/* underlying driver successfully attached */
+	void	*sdioh;		/* handler for sdioh */
+	uint32  vendevid;	/* Target Vendor and Device ID on SD bus */
+	osl_t   *osh;
+	bool	regfail;	/* Save status of last reg_read/reg_write call */
+	uint32	sbwad;		/* Save backplane window address */
+};
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+	sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+{
+	bcmsdh_info_t *bcmsdh;
+
+	if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+		BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+
+	/* save the handler locally */
+	l_bcmsdh = bcmsdh;
+
+	if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
+		bcmsdh_detach(osh, bcmsdh);
+		return NULL;
+	}
+
+	bcmsdh->osh = osh;
+	bcmsdh->init_success = TRUE;
+
+	*regsva = (uint32 *)SI_ENUM_BASE;
+
+	/* Report the BAR, to fix if needed */
+	bcmsdh->sbwad = SI_ENUM_BASE;
+	return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bcmsdh != NULL) {
+		if (bcmsdh->sdioh) {
+			sdioh_detach(osh, bcmsdh->sdioh);
+			bcmsdh->sdioh = NULL;
+		}
+		MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+	}
+
+	l_bcmsdh = NULL;
+	return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+                void *params, int plen, void *arg, int len, bool set)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	bool on;
+
+	ASSERT(bcmsdh);
+	status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+	if (SDIOH_API_SUCCESS(status))
+		return FALSE;
+	else
+		return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	ASSERT(sdh);
+	return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	ASSERT(sdh);
+
+	/* don't support yet */
+	return BCME_UNSUPPORTED;
+}
+
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+	uint8 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+	             addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	uint8 *tmp_buf, *tmp_ptr;
+	uint8 *ptr;
+	bool ascii = func & ~0xf;
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cis);
+	ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+	status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+	if (ascii) {
+		/* Move binary bits to tmp and format them into the provided buffer. */
+		if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+			BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+			return BCME_NOMEM;
+		}
+		bcopy(cis, tmp_buf, length);
+		for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+			ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff);
+			if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+				ptr += sprintf((char *)ptr, "\n");
+		}
+		MFREE(bcmsdh->osh, tmp_buf, length);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+static int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address)
+{
+	int err = 0;
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+
+	return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 word = 0;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bar0 != bcmsdh->sbwad) {
+		if (bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))
+			return 0xFFFFFFFF;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+		SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+	/* if ok, return appropriately masked word */
+	if (SDIOH_API_SUCCESS(status)) {
+		switch (size) {
+			case sizeof(uint8):
+				return (word & 0xff);
+			case sizeof(uint16):
+				return (word & 0xffff);
+			case sizeof(uint32):
+				return word;
+			default:
+				bcmsdh->regfail = TRUE;
+
+		}
+	}
+
+	/* otherwise, bad sdio access or invalid size */
+	BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+	return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+	             __FUNCTION__, addr, size*8, data));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bar0 != bcmsdh->sbwad) {
+		if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+			return err;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+	                            addr, &data, size);
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	if (SDIOH_API_SUCCESS(status))
+		return 0;
+
+	BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+	              __FUNCTION__, data, addr, size));
+	return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+	return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	             __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if (bar0 != bcmsdh->sbwad) {
+		if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+			return err;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	            __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if (bar0 != bcmsdh->sbwad) {
+		if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)))
+			return err;
+
+		bcmsdh->sbwad = bar0;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+	ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+	                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+	                              addr, 4, nbytes, buf, NULL);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+	return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+	ASSERT(sdh);
+	return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+	return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+	return;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 0000000..6fa4737
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,740 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c,v 1.72.6.5 2010-12-23 01:13:15 Exp $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+
+#if defined(OOB_INTR_ONLY)
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(OOB_INTR_ONLY) */
+#if defined(CONFIG_MACH_SANDGATE2G) || defined(CONFIG_MACH_LOGICPD_PXA270)
+#if !defined(BCMPLATFORM_BUS)
+#define BCMPLATFORM_BUS
+#endif /* !defined(BCMPLATFORM_BUS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#include <linux/platform_device.h>
+#endif /* KERNEL_VERSION(2, 6, 19) */
+#endif /* CONFIG_MACH_SANDGATE2G || CONFIG_MACH_LOGICPD_PXA270 */
+
+/**
+ * SDIO Host Controller info
+ */
+typedef struct bcmsdh_hc bcmsdh_hc_t;
+
+struct bcmsdh_hc {
+	bcmsdh_hc_t *next;
+#ifdef BCMPLATFORM_BUS
+	struct device *dev;			/* platform device handle */
+#else
+	struct pci_dev *dev;		/* pci device handle */
+#endif /* BCMPLATFORM_BUS */
+	osl_t *osh;
+	void *regs;			/* SDIO Host Controller address */
+	bcmsdh_info_t *sdh;		/* SDIO Host Controller handle */
+	void *ch;
+	unsigned int oob_irq;
+	unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
+	bool oob_irq_registered;
+	bool oob_irq_enable_flag;
+#if defined(OOB_INTR_ONLY)
+	spinlock_t irq_lock;
+#endif
+};
+static bcmsdh_hc_t *sdhcinfo = NULL;
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL};
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+	/* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+	/* Check for Arasan host controller */
+	if (vendor == VENDOR_SI_IMAGE) {
+		return (TRUE);
+	}
+	/* Check for BRCM 27XX Standard host controller */
+	if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for BRCM Standard host controller */
+	if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for TI PCIxx21 Standard host controller */
+	if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	/* Ricoh R5C822 Standard SDIO Host */
+	if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+		return (TRUE);
+	}
+	/* JMicron Standard SDIO Host */
+	if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+	/* This is the PciSpiHost. */
+	if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		printf("Found PCI SPI Host Controller\n");
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_SPI */
+
+	return (FALSE);
+}
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+/* forward declarations */
+int bcmsdh_probe(struct device *dev);
+int bcmsdh_remove(struct device *dev);
+
+EXPORT_SYMBOL(bcmsdh_probe);
+EXPORT_SYMBOL(bcmsdh_remove);
+
+#else
+/* forward declarations */
+static int __devinit bcmsdh_probe(struct device *dev);
+static int __devexit bcmsdh_remove(struct device *dev);
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static struct device_driver bcmsdh_driver = {
+	.name		= "pxa2xx-mci",
+	.bus		= &platform_bus_type,
+	.probe		= bcmsdh_probe,
+	.remove		= bcmsdh_remove,
+	.suspend	= NULL,
+	.resume		= NULL,
+	};
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_probe(struct device *dev)
+{
+	osl_t *osh = NULL;
+	bcmsdh_hc_t *sdhc = NULL;
+	ulong regs = 0;
+	bcmsdh_info_t *sdh = NULL;
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+	struct platform_device *pdev;
+	struct resource *r;
+#endif /* BCMLXSDMMC */
+	int irq = 0;
+	uint32 vendevid;
+	unsigned long irq_flags = 0;
+
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+	pdev = to_platform_device(dev);
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!r || irq == NO_IRQ)
+		return -ENXIO;
+#endif /* BCMLXSDMMC */
+
+#if defined(OOB_INTR_ONLY)
+#ifdef HW_OOB
+	irq_flags =
+		IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+	 irq_flags = IRQF_TRIGGER_FALLING;
+#endif /* HW_OOB */
+
+	/* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+	irq = dhd_customer_oob_irq_map(&irq_flags);
+	if  (irq < 0) {
+		SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
+		return 1;
+	}
+#endif /* defined(OOB_INTR_ONLY) */
+	/* allocate SDIO Host Controller state info */
+	if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
+		SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+		SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+			__FUNCTION__,
+			MALLOCED(osh)));
+		goto err;
+	}
+	bzero(sdhc, sizeof(bcmsdh_hc_t));
+	sdhc->osh = osh;
+
+	sdhc->dev = (void *)dev;
+
+#ifdef BCMLXSDMMC
+	if (!(sdh = bcmsdh_attach(osh, (void *)0,
+	                          (void **)&regs, irq))) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+#else
+	if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
+	                          (void **)&regs, irq))) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+#endif /* BCMLXSDMMC */
+	sdhc->sdh = sdh;
+	sdhc->oob_irq = irq;
+	sdhc->oob_flags = irq_flags;
+	sdhc->oob_irq_registered = FALSE;	/* to make sure.. */
+	sdhc->oob_irq_enable_flag = FALSE;
+#if defined(OOB_INTR_ONLY)
+	spin_lock_init(&sdhc->irq_lock);
+#endif
+
+	/* chain SDIO Host Controller info together */
+	sdhc->next = sdhcinfo;
+	sdhcinfo = sdhc;
+	/* Read the vendor/device ID from the CIS */
+	vendevid = bcmsdh_query_device(sdh);
+
+	/* try to attach to the target device */
+	if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
+	                                 (vendevid & 0xFFFF), 0, 0, 0, 0,
+	                                (void *)regs, NULL, sdh))) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	return 0;
+
+	/* error handling */
+err:
+	if (sdhc) {
+		if (sdhc->sdh)
+			bcmsdh_detach(sdhc->osh, sdhc->sdh);
+		MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	}
+	if (osh)
+		osl_detach(osh);
+	return -ENODEV;
+}
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_remove(struct device *dev)
+{
+	bcmsdh_hc_t *sdhc, *prev;
+	osl_t *osh;
+
+	sdhc = sdhcinfo;
+	drvinfo.detach(sdhc->ch);
+	bcmsdh_detach(sdhc->osh, sdhc->sdh);
+	/* find the SDIO Host Controller state for this pdev and take it out from the list */
+	for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+		if (sdhc->dev == (void *)dev) {
+			if (prev)
+				prev->next = sdhc->next;
+			else
+				sdhcinfo = NULL;
+			break;
+		}
+		prev = sdhc;
+	}
+	if (!sdhc) {
+		SDLX_MSG(("%s: failed\n", __FUNCTION__));
+		return 0;
+	}
+
+
+	/* release SDIO Host Controller info */
+	osh = sdhc->osh;
+	MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	osl_detach(osh);
+
+#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
+	dev_set_drvdata(dev, NULL);
+#endif /* !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) */
+
+	return 0;
+}
+
+#else /* BCMPLATFORM_BUS */
+
+#if !defined(BCMLXSDMMC)
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+	{ vendor: PCI_ANY_ID,
+	device: PCI_ANY_ID,
+	subvendor: PCI_ANY_ID,
+	subdevice: PCI_ANY_ID,
+	class: 0,
+	class_mask: 0,
+	driver_data: 0,
+	},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+	node:		{},
+	name:		"bcmsdh",
+	id_table:	bcmsdh_pci_devid,
+	probe:		bcmsdh_pci_probe,
+	remove:		bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	save_state:	NULL,
+#endif
+	suspend:	NULL,
+	resume:		NULL,
+	};
+
+
+extern uint sd_pci_slot;	/* Force detection to a particular PCI */
+							/* slot only . Allows for having multiple */
+							/* WL devices at once in a PC */
+							/* Only one instance of dhd will be */
+							/* usable at a time */
+							/* Upper word is bus number, */
+							/* lower word is slot number */
+							/* Default value of 0xffffffff turns this */
+							/* off */
+module_param(sd_pci_slot, uint, 0);
+
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller.  If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	osl_t *osh = NULL;
+	bcmsdh_hc_t *sdhc = NULL;
+	ulong regs;
+	bcmsdh_info_t *sdh = NULL;
+	int rc;
+
+	if (sd_pci_slot != 0xFFFFffff) {
+		if (pdev->bus->number != (sd_pci_slot>>16) ||
+			PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+			SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+				__FUNCTION__,
+				bcmsdh_chipmatch(pdev->vendor, pdev->device)
+				?"Found compatible SDIOHC"
+				:"Probing unknown device",
+				pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
+				pdev->device));
+			return -ENODEV;
+		}
+		SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+			__FUNCTION__,
+			bcmsdh_chipmatch(pdev->vendor, pdev->device)
+			?"Using compatible SDIOHC"
+			:"WARNING, forced use of unkown device",
+			pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
+	}
+
+	if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+	    (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+		uint32 config_reg;
+
+		SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+			SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+			goto err;
+		}
+
+		config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+		/*
+		 * Set MMC_SD_DIS bit in FlashMedia Controller.
+		 * Disbling the SD/MMC Controller in the FlashMedia Controller
+		 * allows the Standard SD Host Controller to take over control
+		 * of the SD Slot.
+		 */
+		config_reg |= 0x02;
+		OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+		osl_detach(osh);
+	}
+	/* match this pci device with what we support */
+	/* we can't solely rely on this to believe it is our SDIO Host Controller! */
+	if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+		return -ENODEV;
+	}
+
+	/* this is a pci device we might support */
+	SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+		__FUNCTION__,
+		pdev->bus->number, PCI_SLOT(pdev->devfn),
+		PCI_FUNC(pdev->devfn), pdev->irq));
+
+	/* use bcmsdh_query_device() to get the vendor ID of the target device so
+	 * it will eventually appear in the Broadcom string on the console
+	 */
+
+	/* allocate SDIO Host Controller state info */
+	if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+		SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+		SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+			__FUNCTION__,
+			MALLOCED(osh)));
+		goto err;
+	}
+	bzero(sdhc, sizeof(bcmsdh_hc_t));
+	sdhc->osh = osh;
+
+	sdhc->dev = pdev;
+
+	/* map to address where host can access */
+	pci_set_master(pdev);
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+		goto err;
+	}
+	if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
+	                          (void **)&regs, pdev->irq))) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	sdhc->sdh = sdh;
+
+	/* try to attach to the target device */
+	if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
+	                                bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
+	                                (void *)regs, NULL, sdh))) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* chain SDIO Host Controller info together */
+	sdhc->next = sdhcinfo;
+	sdhcinfo = sdhc;
+
+	return 0;
+
+	/* error handling */
+err:
+	if (sdhc) {
+		if (sdhc->sdh)
+			bcmsdh_detach(sdhc->osh, sdhc->sdh);
+		MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	}
+	if (osh)
+		osl_detach(osh);
+	return -ENODEV;
+}
+
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+	bcmsdh_hc_t *sdhc, *prev;
+	osl_t *osh;
+
+	/* find the SDIO Host Controller state for this pdev and take it out from the list */
+	for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+		if (sdhc->dev == pdev) {
+			if (prev)
+				prev->next = sdhc->next;
+			else
+				sdhcinfo = NULL;
+			break;
+		}
+		prev = sdhc;
+	}
+	if (!sdhc)
+		return;
+
+	drvinfo.detach(sdhc->ch);
+
+	bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+	/* release SDIO Host Controller info */
+	osh = sdhc->osh;
+	MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+	osl_detach(osh);
+}
+#endif /* BCMLXSDMMC */
+#endif /* BCMPLATFORM_BUS */
+
+extern int sdio_function_init(void);
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+	int error = 0;
+
+	drvinfo = *driver;
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+	SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+	error = sdio_function_init();
+#else
+	SDLX_MSG(("Intel PXA270 SDIO Driver\n"));
+	error = driver_register(&bcmsdh_driver);
+#endif /* defined(BCMLXSDMMC) */
+	return error;
+#endif /* defined(BCMPLATFORM_BUS) */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (!(error = pci_module_init(&bcmsdh_pci_driver)))
+		return 0;
+#else
+	if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
+		return 0;
+#endif
+
+	SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#endif /* BCMPLATFORM_BUS */
+
+	return error;
+}
+
+extern void sdio_function_cleanup(void);
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (bcmsdh_pci_driver.node.next)
+#endif
+
+#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+		driver_unregister(&bcmsdh_driver);
+#endif
+#if defined(BCMLXSDMMC)
+	sdio_function_cleanup();
+#endif /* BCMLXSDMMC */
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+		pci_unregister_driver(&bcmsdh_pci_driver);
+#endif /* BCMPLATFORM_BUS */
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bool enable)
+{
+	static bool curstate = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
+	if (curstate != enable) {
+		if (enable)
+			enable_irq(sdhcinfo->oob_irq);
+		else
+			disable_irq_nosync(sdhcinfo->oob_irq);
+		curstate = enable;
+	}
+	spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+	dhd_pub_t *dhdp;
+
+	dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
+
+	bcmsdh_oob_intr_set(0);
+
+	if (dhdp == NULL) {
+		SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
+		return IRQ_HANDLED;
+	}
+
+	dhdsdio_isr((void *)dhdp->bus);
+
+	return IRQ_HANDLED;
+}
+
+int bcmsdh_register_oob_intr(void * dhdp)
+{
+	int error = 0;
+
+	SDLX_MSG(("%s Enter \n", __FUNCTION__));
+
+	/* IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
+
+	dev_set_drvdata(sdhcinfo->dev, dhdp);
+
+	if (!sdhcinfo->oob_irq_registered) {
+		SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__,
+			(int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
+		/* Refer to customer Host IRQ docs about proper irqflags definition */
+		error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
+			"bcmsdh_sdmmc", NULL);
+		if (error)
+			return -ENODEV;
+
+		enable_irq_wake(sdhcinfo->oob_irq);
+		sdhcinfo->oob_irq_registered = TRUE;
+		sdhcinfo->oob_irq_enable_flag = TRUE;
+	}
+
+	return 0;
+}
+
+void bcmsdh_set_irq(int flag)
+{
+	if (sdhcinfo->oob_irq_registered && sdhcinfo->oob_irq_enable_flag != flag) {
+		SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+		sdhcinfo->oob_irq_enable_flag = flag;
+		if (flag) {
+			enable_irq(sdhcinfo->oob_irq);
+			enable_irq_wake(sdhcinfo->oob_irq);
+		} else {
+			disable_irq_wake(sdhcinfo->oob_irq);
+			disable_irq(sdhcinfo->oob_irq);
+		}
+	}
+}
+
+void bcmsdh_unregister_oob_intr(void)
+{
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+
+	if (sdhcinfo->oob_irq_registered == TRUE) {
+		bcmsdh_set_irq(FALSE);
+		free_irq(sdhcinfo->oob_irq, NULL);
+		sdhcinfo->oob_irq_registered = FALSE;
+	}
+}
+#endif /* defined(OOB_INTR_ONLY) */
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel;	/* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power;	/* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock;	/* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor;	/* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode;	/* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok;	/* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 0000000..8c328b8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1315 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c,v 1.14.64.3 2010-12-23 01:13:15 Exp $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = 512;		/* Default blocksize */
+
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK	0x03
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+	int err_ret;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Enable Function 1 */
+	sdio_claim_host(gInstance->func[1]);
+	err_ret = sdio_enable_func(gInstance->func[1]);
+	sdio_release_host(gInstance->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+	}
+
+	return FALSE;
+}
+
+/*
+ *	Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+	int err_ret;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (gInstance == NULL) {
+		sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (sdioh_sdmmc_osinit(sd) != 0) {
+		sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->num_funcs = 2;
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->client_block_size[0] = 64;
+
+	gInstance->sd = sd;
+
+	/* Claim host controller */
+	sdio_claim_host(gInstance->func[1]);
+
+	sd->client_block_size[1] = 64;
+	err_ret = sdio_set_block_size(gInstance->func[1], 64);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+	}
+
+	/* Release host controller F1 */
+	sdio_release_host(gInstance->func[1]);
+
+	if (gInstance->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(gInstance->func[2]);
+
+		sd->client_block_size[2] = sd_f2_blocksize;
+		err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
+		if (err_ret) {
+			sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
+				sd_f2_blocksize));
+		}
+
+		/* Release host controller F2 */
+		sdio_release_host(gInstance->func[2]);
+	}
+
+	sdioh_sdmmc_card_enablefuncs(sd);
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+
+		/* Disable Function 2 */
+		sdio_claim_host(gInstance->func[2]);
+		sdio_disable_func(gInstance->func[2]);
+		sdio_release_host(gInstance->func[2]);
+
+		/* Disable Function 1 */
+		sdio_claim_host(gInstance->func[1]);
+		sdio_disable_func(gInstance->func[1]);
+		sdio_release_host(gInstance->func[1]);
+
+		/* deregister irq */
+		sdioh_sdmmc_osfree(sd);
+
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(void)
+{
+	uint8 reg;
+	int err;
+
+	if (gInstance->func[0]) {
+		sdio_claim_host(gInstance->func[0]);
+
+		reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+		if (err) {
+			sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			sdio_release_host(gInstance->func[0]);
+			return SDIOH_API_RC_FAIL;
+		}
+
+		/* Enable F1 and F2 interrupts, set master enable */
+		reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
+
+		sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+		sdio_release_host(gInstance->func[0]);
+
+		if (err) {
+			sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(void)
+{
+	uint8 reg;
+	int err;
+
+	if (gInstance->func[0]) {
+		sdio_claim_host(gInstance->func[0]);
+		reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+		if (err) {
+			sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			sdio_release_host(gInstance->func[0]);
+			return SDIOH_API_RC_FAIL;
+		}
+
+		reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+		/* Disable master interrupt with the last function interrupt */
+		if (!(reg & 0xFE))
+			reg = 0;
+		sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+
+		sdio_release_host(gInstance->func[0]);
+		if (err) {
+			sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	if (fn == NULL) {
+		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	/* register and unmask irq */
+	if (gInstance->func[2]) {
+		sdio_claim_host(gInstance->func[2]);
+		sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+		sdio_release_host(gInstance->func[2]);
+	}
+
+	if (gInstance->func[1]) {
+		sdio_claim_host(gInstance->func[1]);
+		sdio_claim_irq(gInstance->func[1], IRQHandler);
+		sdio_release_host(gInstance->func[1]);
+	}
+#elif defined(HW_OOB)
+	sdioh_enable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+	if (gInstance->func[1]) {
+		/* register and unmask irq */
+		sdio_claim_host(gInstance->func[1]);
+		sdio_release_irq(gInstance->func[1]);
+		sdio_release_host(gInstance->func[1]);
+	}
+
+	if (gInstance->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(gInstance->func[2]);
+		sdio_release_irq(gInstance->func[2]);
+		/* Release host controller F2 */
+		sdio_release_host(gInstance->func[2]);
+	}
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+	sdioh_disable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel", IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE, 0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints", 	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG, 	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode", 	IOV_SDMODE, 	0,	IOVT_UINT32,	100},
+	{"sd_highspeed", IOV_HISPEED,	0,	IOVT_UINT32,	0 },
+	{"sd_rxchain",  IOV_RXCHAIN,    0, 	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                           void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		si->client_block_size[func] = blksize;
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RXCHAIN):
+		int_val = FALSE;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+		else if (sd_ptr->offset & 2)
+			int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+		else
+			int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = 0;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+	SDIOH_API_RC status;
+	uint8 data;
+
+	if (enable)
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;	/* enable hw oob interrupt */
+	else
+		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	/* Needed for Android Linux Kernel 2.6.35 */
+	data |= SDIO_SEPINT_ACT_HI; 		/* Active HIGH */
+#endif
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+	return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int err_ret;
+
+	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	if(rw) { /* CMD52 Write */
+		if (func == 0) {
+			/* Can only directly write to some F0 registers.  Handle F2 enable
+			 * as a special case.
+			 */
+			if (regaddr == SDIOD_CCCR_IOEN) {
+				if (gInstance->func[2]) {
+					sdio_claim_host(gInstance->func[2]);
+					if (*byte & SDIO_FUNC_ENABLE_2) {
+						/* Enable Function 2 */
+						err_ret = sdio_enable_func(gInstance->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+								err_ret));
+						}
+					} else {
+						/* Disable Function 2 */
+						err_ret = sdio_disable_func(gInstance->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+								err_ret));
+						}
+					}
+					sdio_release_host(gInstance->func[2]);
+				}
+			}
+#if defined(MMC_SDIO_ABORT)
+			/* to allow abort command through F1 */
+			else if (regaddr == SDIOD_CCCR_IOABORT) {
+				sdio_claim_host(gInstance->func[func]);
+				/*
+				* this sdio_f0_writeb() can be replaced with another api
+				* depending upon MMC driver change.
+				* As of this time, this is temporaray one
+				*/
+				sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(gInstance->func[func]);
+			}
+#endif /* MMC_SDIO_ABORT */
+			else if (regaddr < 0xF0) {
+				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+			} else {
+				/* Claim host controller, perform F0 write, and release */
+				sdio_claim_host(gInstance->func[func]);
+				sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(gInstance->func[func]);
+			}
+		} else {
+			/* Claim host controller, perform Fn write, and release */
+			sdio_claim_host(gInstance->func[func]);
+			sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+			sdio_release_host(gInstance->func[func]);
+		}
+	} else { /* CMD52 Read */
+		/* Claim host controller, perform Fn read, and release */
+		sdio_claim_host(gInstance->func[func]);
+
+		if (func == 0) {
+			*byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+		} else {
+			*byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+		}
+
+		sdio_release_host(gInstance->func[func]);
+	}
+
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+		                        rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                                   uint32 *word, uint nbytes)
+{
+	int err_ret = SDIOH_API_RC_FAIL;
+
+	if (func == 0) {
+		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Claim host controller */
+	sdio_claim_host(gInstance->func[func]);
+
+	if(rw) { /* CMD52 Write */
+		if (nbytes == 4) {
+			sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+		} else if (nbytes == 2) {
+			sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	} else { /* CMD52 Read */
+		if (nbytes == 4) {
+			*word = sdio_readl(gInstance->func[func], addr, &err_ret);
+		} else if (nbytes == 2) {
+			*word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	}
+
+	/* Release host controller */
+	sdio_release_host(gInstance->func[func]);
+
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+		                        rw ? "Write" : "Read", err_ret));
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, void *pkt)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	uint32	SGCount = 0;
+	int err_ret = 0;
+
+	void *pnext;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(pkt);
+	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	/* Claim host controller */
+	sdio_claim_host(gInstance->func[func]);
+	for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+		uint pkt_len = PKTLEN(sd->osh, pnext);
+		pkt_len += 3;
+		pkt_len &= 0xFFFFFFFC;
+
+#ifdef CONFIG_MMC_MSM7X00A
+		if ((pkt_len % 64) == 32) {
+			sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+			pkt_len += 32;
+		}
+#endif /* CONFIG_MMC_MSM7X00A */
+		/* Make sure the packet is aligned properly. If it isn't, then this
+		 * is the fault of sdioh_request_buffer() which is supposed to give
+		 * us something we can work with.
+		 */
+		ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0);
+
+		if ((write) && (!fifo)) {
+			err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				pkt_len);
+		} else if (write) {
+			err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				pkt_len);
+		} else if (fifo) {
+			err_ret = sdio_readsb(gInstance->func[func],
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				addr,
+				pkt_len);
+		} else {
+			err_ret = sdio_memcpy_fromio(gInstance->func[func],
+				((uint8*)PKTDATA(sd->osh, pnext)),
+				addr,
+				pkt_len);
+		}
+
+		if (err_ret) {
+			sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+				__FUNCTION__,
+				(write) ? "TX" : "RX",
+				pnext, SGCount, addr, pkt_len, err_ret));
+		} else {
+			sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+				__FUNCTION__,
+				(write) ? "TX" : "RX",
+				pnext, SGCount, addr, pkt_len));
+		}
+
+		if (!fifo) {
+			addr += pkt_len;
+		}
+		SGCount ++;
+
+	}
+
+	/* Release host controller */
+	sdio_release_host(gInstance->func[func]);
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
+ * then all the packets in the chain must be properly aligned.  If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	SDIOH_API_RC Status;
+	void *mypkt = NULL;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Case 1: we don't have a packet. */
+	if (pkt == NULL) {
+		sd_data(("%s: Creating new %s Packet, len=%d\n",
+		         __FUNCTION__, write ? "TX" : "RX", buflen_u));
+#ifdef DHD_USE_STATIC_BUF
+		if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#else
+		if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+			sd_err(("%s: PKTGET failed: len %d\n",
+			           __FUNCTION__, buflen_u));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		/* For a write, copy the buffer data into the packet. */
+		if (write) {
+			bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
+		}
+
+		Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+		/* For a read, copy the packet data back to the buffer. */
+		if (!write) {
+			bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
+		}
+#ifdef DHD_USE_STATIC_BUF
+		PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+		PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+	} else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
+		/* Case 2: We have a packet, but it is unaligned. */
+
+		/* In this case, we cannot have a chain. */
+		ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+		sd_data(("%s: Creating aligned %s Packet, len=%d\n",
+		         __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
+#ifdef DHD_USE_STATIC_BUF
+		if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#else
+		if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#endif /* DHD_USE_STATIC_BUF */
+			sd_err(("%s: PKTGET failed: len %d\n",
+			           __FUNCTION__, PKTLEN(sd->osh, pkt)));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		/* For a write, copy the buffer data into the packet. */
+		if (write) {
+			bcopy(PKTDATA(sd->osh, pkt),
+			      PKTDATA(sd->osh, mypkt),
+			      PKTLEN(sd->osh, pkt));
+		}
+
+		Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+		/* For a read, copy the packet data back to the buffer. */
+		if (!write) {
+			bcopy(PKTDATA(sd->osh, mypkt),
+			      PKTDATA(sd->osh, pkt),
+			      PKTLEN(sd->osh, mypkt));
+		}
+#ifdef DHD_USE_STATIC_BUF
+		PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+		PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* DHD_USE_STATIC_BUF */
+	} else { /* case 3: We have a packet and it is aligned. */
+		sd_data(("%s: Aligned %s Packet, direct DMA\n",
+		         __FUNCTION__, write ? "Tx" : "Rx"));
+		Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+	}
+
+	return (Status);
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+	char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+	/* issue abort cmd52 command through F1 */
+	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp = 0;
+
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		*data = temp;
+		*data &= 0xff;
+		sd_data(("%s: byte read data=0x%02x\n",
+		         __FUNCTION__, *data));
+	} else {
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+		if (regsize == 2)
+			*data &= 0xffff;
+
+		sd_data(("%s: word read data=0x%08x\n",
+		         __FUNCTION__, *data));
+	}
+
+	return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+	sd = gInstance->sd;
+
+	ASSERT(sd != NULL);
+	sdio_release_host(gInstance->func[0]);
+
+	if (sd->use_client_ints) {
+		sd->intrcount++;
+		ASSERT(sd->intr_handler);
+		ASSERT(sd->intr_handler_arg);
+		(sd->intr_handler)(sd->intr_handler_arg);
+	} else {
+		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+	}
+
+	sdio_claim_host(gInstance->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+
+	sd = gInstance->sd;
+
+	ASSERT(sd != NULL);
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp;
+
+		temp = data & 0xff;
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		sd_data(("%s: byte write data=0x%02x\n",
+		         __FUNCTION__, data));
+	} else {
+		if (regsize == 2)
+			data &= 0xffff;
+
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+		sd_data(("%s: word write data=0x%08x\n",
+		         __FUNCTION__, data));
+	}
+
+	return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *si, int stage)
+{
+	int ret;
+	sdioh_info_t *sd = gInstance->sd;
+
+	/* Need to do this stages as we can't enable the interrupt till
+		downloading of the firmware is complete, other wise polling
+		sdio access will come in way
+	*/
+	if (gInstance->func[0]) {
+			if (stage == 0) {
+		/* Since the power to the chip is killed, we will have
+			re enumerate the device again. Set the block size
+			and enable the fucntion 1 for in preparation for
+			downloading the code
+		*/
+		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
+		   patch for it
+		*/
+		if ((ret = sdio_reset_comm(gInstance->func[0]->card)))
+			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+		else {
+			sd->num_funcs = 2;
+			sd->sd_blockmode = TRUE;
+			sd->use_client_ints = TRUE;
+			sd->client_block_size[0] = 64;
+
+			/* Claim host controller */
+			sdio_claim_host(gInstance->func[1]);
+
+			sd->client_block_size[1] = 64;
+			if (sdio_set_block_size(gInstance->func[1], 64)) {
+				sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+			}
+
+			/* Release host controller F1 */
+			sdio_release_host(gInstance->func[1]);
+
+			if (gInstance->func[2]) {
+				/* Claim host controller F2 */
+				sdio_claim_host(gInstance->func[2]);
+
+				sd->client_block_size[2] = sd_f2_blocksize;
+				if (sdio_set_block_size(gInstance->func[2],
+					sd_f2_blocksize)) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+						"blocksize to %d\n", sd_f2_blocksize));
+				}
+
+				/* Release host controller F2 */
+				sdio_release_host(gInstance->func[2]);
+			}
+
+			sdioh_sdmmc_card_enablefuncs(sd);
+			}
+		} else {
+#if !defined(OOB_INTR_ONLY)
+			sdio_claim_host(gInstance->func[0]);
+			sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+			sdio_claim_irq(gInstance->func[1], IRQHandler);
+			sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+			sdioh_enable_func_intr();
+#endif
+			bcmsdh_oob_intr_set(TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+		}
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+
+	return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *si)
+{
+	/* MSM7201A Android sdio stack has bug with interrupt
+		So internaly within SDIO stack they are polling
+		which cause issue when device is turned off. So
+		unregister interrupt with SDIO stack to stop the
+		polling
+	*/
+	if (gInstance->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+		sdio_claim_host(gInstance->func[0]);
+		sdio_release_irq(gInstance->func[1]);
+		sdio_release_irq(gInstance->func[2]);
+		sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+		sdioh_disable_func_intr();
+#endif
+		bcmsdh_oob_intr_set(FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+	return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return (1);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 0000000..c73e04c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,279 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c,v 1.8.6.2 2011-02-01 18:38:36 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM		0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT	0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB	0x0492	/* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325	0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319	0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+
+#include <bcmsdh_sdmmc.h>
+
+#include <dhd_dbg.h>
+
+#ifdef WL_CFG80211
+extern void wl_cfg80211_set_sdio_func(void *func);
+#endif
+
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+PBCMSDH_SDMMC_INSTANCE gInstance;
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	int ret = 0;
+	static struct sdio_func sdio_func_0;
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_trace(("sdio_device: 0x%04x\n", func->device));
+	sd_trace(("Function#: 0x%04x\n", func->num));
+
+	if (func->num == 1) {
+		sdio_func_0.num = 0;
+		sdio_func_0.card = func->card;
+		gInstance->func[0] = &sdio_func_0;
+		if(func->device == 0x4) { /* 4318 */
+			gInstance->func[2] = NULL;
+			sd_trace(("NIC found, calling bcmsdh_probe...\n"));
+			ret = bcmsdh_probe(&func->dev);
+		}
+	}
+
+	gInstance->func[func->num] = func;
+
+	if (func->num == 2) {
+#ifdef WL_CFG80211
+		wl_cfg80211_set_sdio_func(func);
+#endif
+		sd_trace(("F2 found, calling bcmsdh_probe...\n"));
+		ret = bcmsdh_probe(&func->dev);
+	}
+
+	return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	if (func->num == 2) {
+		sd_trace(("F2 found, calling bcmsdh_remove...\n"));
+		bcmsdh_remove(&func->dev);
+	}
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_NONE)		},
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+	.probe		= bcmsdh_sdmmc_probe,
+	.remove		= bcmsdh_sdmmc_remove,
+	.name		= "bcmsdh_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+	};
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+};
+
+
+int
+sdioh_sdmmc_osinit(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+	sd->sdos_info = (void*)sdos;
+	if (sdos == NULL)
+		return BCME_NOMEM;
+
+	sdos->sd = sd;
+	spin_lock_init(&sdos->lock);
+	return BCME_OK;
+}
+
+void
+sdioh_sdmmc_osfree(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+	ASSERT(sd && sd->sdos_info);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+#if !defined(OOB_INTR_ONLY)
+	if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+		sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	/* Ensure atomicity for enable/disable calls */
+	spin_lock_irqsave(&sdos->lock, flags);
+
+	sd->client_intr_enabled = enable;
+	if (enable) {
+		sdioh_sdmmc_devintr_on(sd);
+	} else {
+		sdioh_sdmmc_devintr_off(sd);
+	}
+
+	spin_unlock_irqrestore(&sdos->lock, flags);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+	int error = 0;
+	sdio_function_init();
+	return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+	sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int sdio_function_init(void)
+{
+	int error = 0;
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+
+	gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
+	if (!gInstance)
+		return -ENOMEM;
+
+	error = sdio_register_driver(&bcmsdh_sdmmc_driver);
+
+
+	return error;
+}
+
+/*
+ * module cleanup
+*/
+extern int bcmsdh_remove(struct device *dev);
+void sdio_function_cleanup(void)
+{
+	sd_trace(("%s Enter\n", __FUNCTION__));
+
+
+	sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+
+	if (gInstance)
+		kfree(gInstance);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 0000000..fbdd7cd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,1967 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c,v 1.277.2.18 2011-01-26 02:32:08 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+void *_bcmutils_dummy_fn = NULL;
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	if (len < 0)
+		len = 4096; /* "infinite" */
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(PKTDATA(osh, p) + offset, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(buf, PKTDATA(osh, p) + offset, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+	uint total;
+
+	total = 0;
+	for (; p; p = PKTNEXT(osh, p))
+		total += PKTLEN(osh, p);
+	return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+	for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+		;
+
+	return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+	uint cnt;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p))
+		cnt++;
+
+	return cnt;
+}
+
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, p);
+	else
+		q->head = p;
+
+	q->tail = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head == NULL)
+		q->tail = p;
+
+	PKTSETLINK(p, q->head);
+	q->head = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	q = &pq->q[prec];
+	p = q->head;
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			bool head = (p == q->head);
+			if (head)
+				q->head = PKTLINK(p);
+			else
+				PKTSETLINK(prev, PKTLINK(p));
+			PKTSETLINK(p, NULL);
+			PKTFREE(osh, p, dir);
+			q->len--;
+			pq->len--;
+			p = (head ? q->head : PKTLINK(prev));
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+
+	if (q->head == NULL) {
+		ASSERT(q->len == 0);
+		q->tail = NULL;
+	}
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	if (!pktbuf)
+		return FALSE;
+
+	q = &pq->q[prec];
+
+	if (q->head == pktbuf) {
+		if ((q->head = PKTLINK(pktbuf)) == NULL)
+			q->tail = NULL;
+	} else {
+		for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+			;
+		if (p == NULL)
+			return FALSE;
+
+		PKTSETLINK(p, PKTLINK(pktbuf));
+		if (q->tail == pktbuf)
+			q->tail = p;
+	}
+
+	q->len--;
+	pq->len--;
+	PKTSETLINK(pktbuf, NULL);
+	return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+	int prec;
+
+	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+	/* pq is variable size; only zero out what's requested */
+	bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+	pq->num_prec = (uint16)num_prec;
+
+	pq->max = (uint16)max_len;
+
+	for (prec = 0; prec < num_prec; prec++)
+		pq->q[prec].max = pq->max;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+	int prec;
+	for (prec = 0; prec < pq->num_prec; prec++)
+		pktq_pflush(osh, pq, prec, dir, fn, arg);
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+	int prec, len;
+
+	len = 0;
+
+	for (prec = 0; prec <= pq->hi_prec; prec++)
+		if (prec_bmp & (1 << prec))
+			len += pq->q[prec].len;
+
+	return len;
+}
+
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+#endif /* BCMDRIVER */
+
+const unsigned char bcm_ctype[] = {
+
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,            /* 0-7 */
+	_BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+	_BCM_C, /* 8-15 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,            /* 16-23 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,            /* 24-31 */
+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,        /* 32-39 */
+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,            /* 40-47 */
+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,            /* 48-55 */
+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,            /* 56-63 */
+	_BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+	_BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,            /* 72-79 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,            /* 80-87 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,            /* 88-95 */
+	_BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+	_BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,     /* 128-143 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,     /* 144-159 */
+	_BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(char *cp, char **endp, uint base)
+{
+	ulong result, last_result = 0, value;
+	bool minus;
+
+	minus = FALSE;
+
+	while (bcm_isspace(*cp))
+		cp++;
+
+	if (cp[0] == '+')
+		cp++;
+	else if (cp[0] == '-') {
+		minus = TRUE;
+		cp++;
+	}
+
+	if (base == 0) {
+		if (cp[0] == '0') {
+			if ((cp[1] == 'x') || (cp[1] == 'X')) {
+				base = 16;
+				cp = &cp[2];
+			} else {
+				base = 8;
+				cp = &cp[1];
+			}
+		} else
+			base = 10;
+	} else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+		cp = &cp[2];
+	}
+
+	result = 0;
+
+	while (bcm_isxdigit(*cp) &&
+		(value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base)
+	{
+		result = result*base + value;
+		/* Detected overflow */
+		if (result < last_result && !minus)
+			return (ulong)-1;
+		last_result = result;
+		cp++;
+	}
+
+	if (minus)
+		result = (ulong)(-(long)result);
+
+	if (endp)
+		*endp = (char *)cp;
+
+	return (result);
+}
+
+int
+bcm_atoi(char *s)
+{
+	return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char*
+bcmstrstr(char *haystack, char *needle)
+{
+	int len, nlen;
+	int i;
+
+	if ((haystack == NULL) || (needle == NULL))
+		return (haystack);
+
+	nlen = strlen(needle);
+	len = strlen(haystack) - nlen + 1;
+
+	for (i = 0; i < len; i++)
+		if (memcmp(needle, &haystack[i], nlen) == 0)
+			return (&haystack[i]);
+	return (NULL);
+}
+
+char*
+bcmstrcat(char *dest, const char *src)
+{
+	char *p;
+
+	p = dest + strlen(dest);
+
+	while ((*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+char*
+bcmstrncat(char *dest, const char *src, uint size)
+{
+	char *endp;
+	char *p;
+
+	p = dest + strlen(dest);
+	endp = p + size;
+
+	while (p != endp && (*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+
+/****************************************************************************
+* Function:   bcmstrtok
+*
+* Purpose:
+*  Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+*  but allows strToken() to be used by different strings or callers at the same
+*  time. Each call modifies '*string' by substituting a NULL character for the
+*  first delimiter that is encountered, and updates 'string' to point to the char
+*  after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+*  string      (mod) Ptr to string ptr, updated by token.
+*  delimiters  (in)  Set of delimiter characters.
+*  tokdelim    (out) Character that delimits the returned token. (May
+*                    be set to NULL if token delimiter is not required).
+*
+* Returns:  Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+	unsigned char *str;
+	unsigned long map[8];
+	int count;
+	char *nextoken;
+
+	if (tokdelim != NULL) {
+		/* Prime the token delimiter */
+		*tokdelim = '\0';
+	}
+
+	/* Clear control map */
+	for (count = 0; count < 8; count++) {
+		map[count] = 0;
+	}
+
+	/* Set bits in delimiter table */
+	do {
+		map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+	}
+	while (*delimiters++);
+
+	str = (unsigned char*)*string;
+
+	/* Find beginning of token (skip over leading delimiters). Note that
+	 * there is no token iff this loop sets str to point to the terminal
+	 * null (*str == '\0')
+	 */
+	while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+		str++;
+	}
+
+	nextoken = (char*)str;
+
+	/* Find the end of the token. If it is not the end of the string,
+	 * put a null there.
+	 */
+	for (; *str; str++) {
+		if (map[*str >> 5] & (1 << (*str & 31))) {
+			if (tokdelim != NULL) {
+				*tokdelim = *str;
+			}
+
+			*str++ = '\0';
+			break;
+		}
+	}
+
+	*string = (char*)str;
+
+	/* Determine if a token has been found. */
+	if (nextoken == (char *) str) {
+		return NULL;
+	}
+	else {
+		return nextoken;
+	}
+}
+
+
+#define xToLower(C) \
+	((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function:   bcmstricmp
+*
+* Purpose:    Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+*             s2 (in) Second string to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+	char dc, sc;
+
+	while (*s2 && *s1) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+	}
+
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+
+/****************************************************************************
+* Function:   bcmstrnicmp
+*
+* Purpose:    Compare to strings case insensitively, upto a max of 'cnt'
+*             characters.
+*
+* Parameters: s1  (in) First string to compare.
+*             s2  (in) Second string to compare.
+*             cnt (in) Max characters to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+	char dc, sc;
+
+	while (*s2 && *s1 && cnt) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+		cnt--;
+	}
+
+	if (!cnt) return 0;
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(char *p, struct ether_addr *ea)
+{
+	int i = 0;
+
+	for (;;) {
+		ea->octet[i++] = (char) bcm_strtoul(p, &p, 16);
+		if (!*p++ || i == 6)
+			break;
+	}
+
+	return (i == 6);
+}
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+	ulong copyct = 1;
+	ushort i;
+
+	if (abuflen == 0)
+		return 0;
+
+	/* wbuflen is in bytes */
+	wbuflen /= sizeof(ushort);
+
+	for (i = 0; i < wbuflen; ++i) {
+		if (--abuflen == 0)
+			break;
+		*abuf++ = (char) *wbuf++;
+		++copyct;
+	}
+	*abuf = '\0';
+
+	return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+	static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x";
+	snprintf(buf, 18, template,
+		ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff,
+		ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff);
+	return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+	snprintf(buf, 16, "%d.%d.%d.%d",
+		ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+	return (buf);
+}
+
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+	uint i;
+
+	for (i = 0; i < ms; i++) {
+		OSL_DELAY(1000);
+	}
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+	void *p;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	for (p = p0; p; p = PKTNEXT(osh, p))
+		prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif  
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata;
+	int priority = 0;
+	int rc = 0;
+
+	pktdata = (uint8 *) PKTDATA(NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+	eh = (struct ether_header *) pktdata;
+
+	if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+		uint16 vlan_tag;
+		int vlan_prio, dscp_prio = 0;
+
+		evh = (struct ethervlan_header *)eh;
+
+		vlan_tag = ntoh16(evh->vlan_tag);
+		vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+		if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+			uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+			uint8 tos_tc = IP_TOS46(ip_body);
+			dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		}
+
+		/* DSCP priority gets precedence over 802.1P (vlan tag) */
+		if (dscp_prio != 0) {
+			priority = dscp_prio;
+			rc |= PKTPRIO_VDSCP;
+		} else {
+			priority = vlan_prio;
+			rc |= PKTPRIO_VLAN;
+		}
+		/*
+		 * If the DSCP priority is not the same as the VLAN priority,
+		 * then overwrite the priority field in the vlan tag, with the
+		 * DSCP priority value. This is required for Linux APs because
+		 * the VLAN driver on Linux, overwrites the skb->priority field
+		 * with the priority value in the vlan tag
+		 */
+		if (update_vtag && (priority != vlan_prio)) {
+			vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+			vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+			evh->vlan_tag = hton16(vlan_tag);
+			rc |= PKTPRIO_UPD;
+		}
+	} else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+		uint8 *ip_body = pktdata + sizeof(struct ether_header);
+		uint8 tos_tc = IP_TOS46(ip_body);
+		priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		rc |= PKTPRIO_DSCP;
+	}
+
+	ASSERT(priority >= 0 && priority <= MAXPRIO);
+	PKTSETPRIO(pkt, priority);
+	return (rc | priority);
+}
+
+#ifndef BCM_BOOTLOADER
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings  */
+const char *
+bcmerrorstr(int bcmerror)
+{
+	/* check if someone added a bcmerror code but forgot to add errorstring */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+	if (bcmerror > 0 || bcmerror < BCME_LAST) {
+		snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+		return bcm_undeferrstr;
+	}
+
+	ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+	return bcmerrorstrtable[-bcmerror];
+}
+
+#endif /* !BCM_BOOTLOADER */
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+	const bcm_iovar_t *vi;
+	const char *lookup_name;
+
+	/* skip any ':' delimited option prefixes */
+	lookup_name = strrchr(name, ':');
+	if (lookup_name != NULL)
+		lookup_name++;
+	else
+		lookup_name = name;
+
+	ASSERT(table != NULL);
+
+	for (vi = table; vi->name; vi++) {
+		if (!strcmp(vi->name, lookup_name))
+			return vi;
+	}
+	/* ran to end of table */
+
+	return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+
+	/* length check on io buf */
+	switch (vi->type) {
+	case IOVT_BOOL:
+	case IOVT_INT8:
+	case IOVT_INT16:
+	case IOVT_INT32:
+	case IOVT_UINT8:
+	case IOVT_UINT16:
+	case IOVT_UINT32:
+		/* all integers are int32 sized args at the ioctl interface */
+		if (len < (int)sizeof(int)) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_BUFFER:
+		/* buffer must meet minimum length requirement */
+		if (len < vi->minlen) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_VOID:
+		if (!set) {
+			/* Cannot return nil... */
+			bcmerror = BCME_UNSUPPORTED;
+		} else if (len) {
+			/* Set is an action w/o parameters */
+			bcmerror = BCME_BUFTOOLONG;
+		}
+		break;
+
+	default:
+		/* unknown type for length check in iovar info */
+		ASSERT(0);
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#endif  /* BCMDRIVER */
+
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ *       x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+	0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+	0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+	0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+	0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+	0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+	0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+	0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+	0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+	0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+	0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+	0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+	0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+	0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+	0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+	0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+	0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+	0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+	0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+	0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+	0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+	0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+	0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+	0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+	0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+	0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+	0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+	0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+	0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+	0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+	0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+	0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+	0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+	(c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+	uint8 *pdata,   /* pointer to array of data to process */
+	uint  nbytes,   /* number of input data bytes to process */
+	uint8 crc   /* either CRC8_INIT_VALUE or previous return value */
+)
+{
+	/* hard code the crc loop instead of using CRC_INNER_LOOP macro
+	 * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+	 */
+	while (nbytes-- > 0)
+		crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ *       x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+	0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+	0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+	0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+	0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+	0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+	0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+	0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+	0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+	0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+	0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+	0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+	0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+	0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+	0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+	0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+	0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+	0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+	0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+	0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+	0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+	0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+	0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+	0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+	0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+	0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+	0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+	0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+	0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+	0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+	0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+	0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+	0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+	uint8 *pdata,  /* pointer to array of data to process */
+	uint nbytes, /* number of input data bytes to process */
+	uint16 crc     /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+	while (nbytes-- > 0)
+		CRC_INNER_LOOP(16, crc, *pdata++);
+	return crc;
+}
+
+static const uint32 crc32_table[256] = {
+	0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+	0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+	0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+	0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+	0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+	0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+	0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+	0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+	0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+	0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+	0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+	0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+	0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+	0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+	0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+	0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+	0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+	0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+	0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+	0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+	0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+	0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+	0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+	0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+	0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+	0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+	0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+	0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+	0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+	0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+	0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+	0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+	0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+	0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+	0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+	0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+	0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+	0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+	0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+	0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+	0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+	0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+	0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+	0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+	0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+	0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+	0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+	0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+	0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+	0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+	0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+	0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+	0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+	0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+	0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+	0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+	0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+	0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+	0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+	0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+	0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+	0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+	0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+	0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+	uint8 *pend;
+#ifdef __mips__
+	uint8 tmp[4];
+	ulong *tptr = (ulong *)tmp;
+
+	/* in case the beginning of the buffer isn't aligned */
+	pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc);
+	nbytes -= (pend - pdata);
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+
+	/* handle bulk of data as 32-bit words */
+	pend = pdata + (nbytes & 0xfffffffc);
+	while (pdata < pend) {
+		*tptr = *(ulong *)pdata;
+		pdata += sizeof(ulong *);
+		CRC_INNER_LOOP(32, crc, tmp[0]);
+		CRC_INNER_LOOP(32, crc, tmp[1]);
+		CRC_INNER_LOOP(32, crc, tmp[2]);
+		CRC_INNER_LOOP(32, crc, tmp[3]);
+	}
+
+	/* 1-3 bytes at end of buffer */
+	pend = pdata + (nbytes & 0x03);
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+#else
+	pend = pdata + nbytes;
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+#endif /* __mips__ */
+
+	return crc;
+}
+
+#ifdef notdef
+#define CLEN    1499    /*  CRC Length */
+#define CBUFSIZ     (CLEN+4)
+#define CNBUFS      5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+	uint j, k, l;
+	uint8 *buf;
+	uint len[CNBUFS];
+	uint32 crcr;
+	uint32 crc32tv[CNBUFS] =
+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+	/* step through all possible alignments */
+	for (l = 0; l <= 4; l++) {
+		for (j = 0; j < CNBUFS; j++) {
+			len[j] = CLEN;
+			for (k = 0; k < len[j]; k++)
+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+		}
+
+		for (j = 0; j < CNBUFS; j++) {
+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+			ASSERT(crcr == crc32tv[j]);
+		}
+	}
+
+	MFREE(buf, CBUFSIZ*CNBUFS);
+	return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+	int len;
+
+	/* validate current elt */
+	if (!bcm_valid_tlv(elt, *buflen))
+		return NULL;
+
+	/* advance to next elt */
+	len = elt->len;
+	elt = (bcm_tlv_t*)(elt->data + len);
+	*buflen -= (2 + len);
+
+	/* validate next elt */
+	if (!bcm_valid_tlv(elt, *buflen))
+		return NULL;
+
+	return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= 2) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (len + 2)))
+			return (elt);
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+		totlen -= (len + 2);
+	}
+
+	return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.  Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= 2) {
+		uint id = elt->id;
+		int len = elt->len;
+
+		/* Punt if we start seeing IDs > than target key */
+		if (id > key)
+			return (NULL);
+
+		/* validate remaining totlen */
+		if ((id == key) && (totlen >= (len + 2)))
+			return (elt);
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+		totlen -= (len + 2);
+	}
+	return NULL;
+}
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+	int i;
+	char* p = buf;
+	char hexstr[16];
+	int slen = 0, nlen = 0;
+	uint32 bit;
+	const char* name;
+
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0; flags != 0; i++) {
+		bit = bd[i].bit;
+		name = bd[i].name;
+		if (bit == 0 && flags != 0) {
+			/* print any unnamed bits */
+			snprintf(hexstr, 16, "0x%X", flags);
+			name = hexstr;
+			flags = 0;  /* exit loop */
+		} else if ((flags & bit) == 0)
+			continue;
+		flags &= ~bit;
+		nlen = strlen(name);
+		slen += nlen;
+		/* count btwn flag space */
+		if (flags != 0)
+			slen += 1;
+		/* need NULL char as well */
+		if (len <= slen)
+			break;
+		/* copy NULL char but don't count it */
+		strncpy(p, name, nlen + 1);
+		p += nlen;
+		/* copy btwn flag space and NULL char */
+		if (flags != 0)
+			p += snprintf(p, 2, " ");
+		len -= slen;
+	}
+
+	/* indicate the str was too short */
+	if (flags != 0) {
+		if (len < 2)
+			p -= 2 - len;   /* overwrite last char */
+		p += snprintf(p, 2, ">");
+	}
+
+	return (int)(p - buf);
+}
+#endif 
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG) || defined(WLMEDIA_PEAKRATE)
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+	int i;
+	char *p = str;
+	const uint8 *src = (const uint8*)bytes;
+
+	for (i = 0; i < len; i++) {
+		p += snprintf(p, 3, "%02X", *src);
+		src++;
+	}
+	return (int)(p - str);
+}
+#endif 
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+	char line[128], *p;
+	int len = sizeof(line);
+	int nchar;
+	uint i;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	p = line;
+	for (i = 0; i < nbytes; i++) {
+		if (i % 16 == 0) {
+			nchar = snprintf(p, len, "  %04d: ", i);    /* line prefix */
+			p += nchar;
+			len -= nchar;
+		}
+		if (len > 0) {
+			nchar = snprintf(p, len, "%02x ", buf[i]);
+			p += nchar;
+			len -= nchar;
+		}
+
+		if (i % 16 == 15) {
+			printf("%s\n", line);       /* flush line */
+			p = line;
+			len = sizeof(line);
+		}
+	}
+
+	/* flush last partial line */
+	if (p != line)
+		printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+	"NONE",
+	"WEP1",
+	"TKIP",
+	"WEP128",
+	"AES_CCM",
+	"AES_OCB_MSDU",
+	"AES_OCB_MPDU",
+	"NALG"
+	"UNDEF",
+	"UNDEF",
+	"UNDEF",
+	"UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+	return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+	if (brev < 0x100)
+		snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+	else
+		snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+	return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+	uint len, max_len;
+	char c;
+
+	len = strlen(buf);
+
+	max_len = BUFSIZE_TODUMP_ATONCE;
+
+	while (len > max_len) {
+		c = buf[max_len];
+		buf[max_len] = '\0';
+		printf("%s", buf);
+		buf[max_len] = c;
+
+		buf += max_len;
+		len -= max_len;
+	}
+	/* print the remaining string */
+	printf("%s\n", buf);
+	return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+	char *buf, uint32 bufsize)
+{
+	uint  filled_len;
+	int len;
+	struct fielddesc *cur_ptr;
+
+	filled_len = 0;
+	cur_ptr = fielddesc_array;
+
+	while (bufsize > 1) {
+		if (cur_ptr->nameandfmt == NULL)
+			break;
+		len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+			read_rtn(arg0, arg1, cur_ptr->offset));
+		/* check for snprintf overflow or error */
+		if (len < 0 || (uint32)len >= bufsize)
+			len = bufsize - 1;
+		buf += len;
+		bufsize -= len;
+		filled_len += len;
+		cur_ptr++;
+	}
+	return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+	uint len;
+
+	len = strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	strncpy(buf, name, buflen);
+
+	/* append data onto the end of the name string */
+	memcpy(&buf[len], data, datalen);
+	len += datalen;
+
+	return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153     /* Offset for first entry */
+#define QDBM_TABLE_LEN 40   /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm:    +0  +1  +2  +3  +4  +5  +6  +7 */
+/* 153: */      6683,   7079,   7499,   7943,   8414,   8913,   9441,   10000,
+/* 161: */      10593,  11220,  11885,  12589,  13335,  14125,  14962,  15849,
+/* 169: */      16788,  17783,  18836,  19953,  21135,  22387,  23714,  25119,
+/* 177: */      26607,  28184,  29854,  31623,  33497,  35481,  37584,  39811,
+/* 185: */      42170,  44668,  47315,  50119,  53088,  56234,  59566,  63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+	uint factor = 1;
+	int idx = qdbm - QDBM_OFFSET;
+
+	if (idx >= QDBM_TABLE_LEN) {
+		/* clamp to max uint16 mW value */
+		return 0xFFFF;
+	}
+
+	/* scale the qdBm index up to the range of the table 0-40
+	 * where an offset of 40 qdBm equals a factor of 10 mW.
+	 */
+	while (idx < 0) {
+		idx += 40;
+		factor *= 10;
+	}
+
+	/* return the mW value scaled down to the correct factor of 10,
+	 * adding in factor/2 to get proper rounding.
+	 */
+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+	uint8 qdbm;
+	int offset;
+	uint mw_uint = mw;
+	uint boundary;
+
+	/* handle boundary case */
+	if (mw_uint <= 1)
+		return 0;
+
+	offset = QDBM_OFFSET;
+
+	/* move mw into the range of the table */
+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+		mw_uint *= 10;
+		offset -= 40;
+	}
+
+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+			nqdBm_to_mW_map[qdbm])/2;
+		if (mw_uint < boundary)
+			break;
+	}
+
+	qdbm += (uint8)offset;
+
+	return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+	uint bitcount = 0, i;
+	uint8 tmp;
+	for (i = 0; i < length; i++) {
+		tmp = bitmap[i];
+		while (tmp) {
+			bitcount++;
+			tmp &= (tmp - 1);
+		}
+	}
+	return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+	b->origsize = b->size = size;
+	b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+	r = vsnprintf(b->buf, b->size, fmt, ap);
+
+	/* Non Ansi C99 compliant returns -1,
+	 * Ansi compliant return r >= b->size,
+	 * bcmstdlib returns 0, handle all
+	 */
+	if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
+		b->size = 0;
+	} else {
+		b->size -= r;
+		b->buf += r;
+	}
+
+	va_end(ap);
+
+	return r;
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+	int i;
+
+	for (i = 0; i < num_bytes; i++) {
+		num[i] += amount;
+		if (num[i] >= amount)
+			break;
+		amount = 1;
+	}
+}
+
+int
+bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes)
+{
+	int i;
+
+	for (i = nbytes - 1; i >= 0; i--) {
+		if (arg1[i] != arg2[i])
+			return (arg1[i] - arg2[i]);
+	}
+	return 0;
+}
+
+void
+bcm_print_bytes(char *name, const uchar *data, int len)
+{
+	int i;
+	int per_line = 0;
+
+	printf("%s: %d \n", name ? name : "", len);
+	for (i = 0; i < len; i++) {
+		printf("%02x ", *data++);
+		per_line++;
+		if (per_line == 16) {
+			per_line = 0;
+			printf("\n");
+		}
+	}
+	printf("\n");
+}
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN    ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+	uint i, c;
+	char *p = buf;
+	char *endp = buf + SSID_FMT_BUF_LEN;
+
+	if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (uint)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (bcm_isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += snprintf(p, (endp - p), "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+	ASSERT(p < endp);
+
+	return (int)(p - buf);
+}
+#endif 
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs.  End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+	char *dp;
+	bool findNewline;
+	int column;
+	unsigned int buf_len, n;
+	unsigned int pad = 0;
+
+	dp = varbuf;
+
+	findNewline = FALSE;
+	column = 0;
+
+	for (n = 0; n < len; n++) {
+		if (varbuf[n] == '\r')
+			continue;
+		if (findNewline && varbuf[n] != '\n')
+			continue;
+		findNewline = FALSE;
+		if (varbuf[n] == '#') {
+			findNewline = TRUE;
+			continue;
+		}
+		if (varbuf[n] == '\n') {
+			if (column == 0)
+				continue;
+			*dp++ = 0;
+			column = 0;
+			continue;
+		}
+		*dp++ = varbuf[n];
+		column++;
+	}
+	buf_len = (unsigned int)(dp - varbuf);
+	if (buf_len % 4) {
+		pad = 4 - buf_len % 4;
+		if (pad && (buf_len + pad <= len)) {
+			buf_len += pad;
+		}
+	}
+
+	while (dp < varbuf + n)
+		*dp++ = 0;
+
+	return buf_len;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi.c b/drivers/net/wireless/bcmdhd/bcmwifi.c
new file mode 100644
index 0000000..7072217
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi.c
@@ -0,0 +1,274 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi.c,v 1.31.8.1 2010-08-03 17:47:05 Exp $
+ */
+
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif 
+#include <bcmwifi.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> 	
+#endif
+
+
+
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+	const char *band, *bw, *sb;
+	uint channel;
+
+	band = "";
+	bw = "";
+	sb = "";
+	channel = CHSPEC_CHANNEL(chspec);
+	
+	if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+	    (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+		band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+	if (CHSPEC_IS40(chspec)) {
+		if (CHSPEC_SB_UPPER(chspec)) {
+			sb = "u";
+			channel += CH_10MHZ_APART;
+		} else {
+			sb = "l";
+			channel -= CH_10MHZ_APART;
+		}
+	} else if (CHSPEC_IS10(chspec)) {
+		bw = "n";
+	}
+
+	
+	snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+	return (buf);
+}
+
+
+chanspec_t
+wf_chspec_aton(char *a)
+{
+	char *endp = NULL;
+	uint channel, band, bw, ctl_sb;
+	char c;
+
+	channel = strtoul(a, &endp, 10);
+
+	
+	if (endp == a)
+		return 0;
+
+	if (channel > MAXCHANNEL)
+		return 0;
+
+	band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+	bw = WL_CHANSPEC_BW_20;
+	ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+	a = endp;
+
+	c = tolower(a[0]);
+	if (c == '\0')
+		goto done;
+
+	
+	if (c == 'a' || c == 'b') {
+		band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+		a++;
+		c = tolower(a[0]);
+		if (c == '\0')
+			goto done;
+	}
+
+	
+	if (c == 'n') {
+		bw = WL_CHANSPEC_BW_10;
+	} else if (c == 'l') {
+		bw = WL_CHANSPEC_BW_40;
+		ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+		
+		if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+			channel += CH_10MHZ_APART;
+		else
+			return 0;
+	} else if (c == 'u') {
+		bw = WL_CHANSPEC_BW_40;
+		ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+		
+		if (channel > CH_20MHZ_APART)
+			channel -= CH_10MHZ_APART;
+		else
+			return 0;
+	} else {
+		return 0;
+	}
+
+done:
+	return (channel | band | bw | ctl_sb);
+}
+
+
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+	
+	if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+		return TRUE;
+	
+	if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+		return TRUE;
+
+	
+	if (CHSPEC_IS20_UNCOND(chanspec)) {
+		if (!CHSPEC_SB_NONE(chanspec))
+			return TRUE;
+	} else {
+		if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+	uint8 ctl_chan;
+
+	
+	if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+		return CHSPEC_CHANNEL(chspec);
+	} else {
+		
+		ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40);
+		
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+			
+			ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+		} else {
+			ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER);
+			
+			ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+		}
+	}
+
+	return ctl_chan;
+}
+
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+	chanspec_t ctl_chspec = 0;
+	uint8 channel;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	
+	if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+		return chspec;
+	} else {
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+			channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+		} else {
+			channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+		}
+		ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+		ctl_chspec |= CHSPEC_BAND(chspec);
+	}
+	return ctl_chspec;
+}
+
+
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+	int ch = -1;
+	uint base;
+	int offset;
+
+	
+	if (start_factor == 0) {
+		if (freq >= 2400 && freq <= 2500)
+			start_factor = WF_CHAN_FACTOR_2_4_G;
+		else if (freq >= 5000 && freq <= 6000)
+			start_factor = WF_CHAN_FACTOR_5_G;
+	}
+
+	if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+		return 14;
+
+	base = start_factor / 2;
+
+	
+	if ((freq < base) || (freq > base + 1000))
+		return -1;
+
+	offset = freq - base;
+	ch = offset / 5;
+
+	
+	if (offset != (ch * 5))
+		return -1;
+
+	
+	if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+		return -1;
+
+	return ch;
+}
+
+
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+	int freq;
+
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+	    (ch > 200))
+		freq = -1;
+	else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+		freq = 2484;
+	else
+		freq = ch * 5 + start_factor / 2;
+
+	return freq;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 0000000..bca7535
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,691 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h,v 1.60.4.17 2011-01-09 08:11:56 Exp $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#if defined(CHROMIUMOS_COMPAT_WIRELESS)
+#include <linux/sched.h>
+#endif
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+
+#define ALL_INTERFACES	0xff
+
+#include <wlioctl.h>
+
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_cmn;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+	DHD_BUS_DOWN,		/* Not ready for frame transfers */
+	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
+	DHD_BUS_DATA		/* Ready for frame transfers */
+};
+
+enum dhd_bus_wake_state {
+	WAKE_LOCK_OFF,
+	WAKE_LOCK_PRIV,
+	WAKE_LOCK_DPC,
+	WAKE_LOCK_IOCTL,
+	WAKE_LOCK_DOWNLOAD,
+	WAKE_LOCK_TMOUT,
+	WAKE_LOCK_WATCHDOG,
+	WAKE_LOCK_LINK_DOWN_TMOUT,
+	WAKE_LOCK_PNO_FIND_TMOUT,
+	WAKE_LOCK_SOFTAP_SET,
+	WAKE_LOCK_SOFTAP_STOP,
+	WAKE_LOCK_SOFTAP_START,
+	WAKE_LOCK_SOFTAP_THREAD,
+	WAKE_LOCK_MAX
+};
+enum dhd_prealloc_index {
+	DHD_PREALLOC_PROT = 0,
+	DHD_PREALLOC_RXBUF,
+	DHD_PREALLOC_DATABUF,
+	DHD_PREALLOC_OSL_BUF
+};
+
+#if defined(DHD_USE_STATIC_BUF)
+
+uint8* dhd_os_prealloc(void *osh, int section, uint size);
+void dhd_os_prefree(void *osh, void *addr, uint size);
+#define DHD_OS_PREALLOC(osh, section, size) dhd_os_prealloc(osh, section, size)
+#define DHD_OS_PREFREE(osh, addr, size) dhd_os_prefree(osh, addr, size)
+
+#else
+
+#define DHD_OS_PREALLOC(osh, section, size) MALLOC(osh, size)
+#define DHD_OS_PREFREE(osh, addr, size) MFREE(osh, addr, size)
+
+#endif /* defined(DHD_USE_STATIC_BUF) */
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+	/* Linkage ponters */
+	osl_t *osh;		/* OSL handle */
+	struct dhd_bus *bus;	/* Bus module handle */
+	struct dhd_prot *prot;	/* Protocol module handle */
+	struct dhd_info  *info; /* Info module handle */
+	struct dhd_cmn	*cmn;	/* dhd_common module handle */
+
+	/* Internal dhd items */
+	bool up;		/* Driver up/down (to OS) */
+	bool txoff;		/* Transmit flow-controlled */
+	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
+	enum dhd_bus_state busstate;
+	uint hdrlen;		/* Total DHD header length (proto + bus) */
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	uint rxsz;		/* Rx buffer size bus module should use */
+	uint8 wme_dp;	/* wme discard priority */
+
+	/* Dongle media info */
+	bool iswl;		/* Dongle-resident driver is wl */
+	ulong drv_version;	/* Version of dongle-resident driver */
+	struct ether_addr mac;	/* MAC address obtained from dongle */
+	dngl_stats_t dstats;	/* Stats for dongle-based data */
+
+	/* Additional stats for the bus level */
+	ulong tx_packets;	/* Data packets sent to dongle */
+	ulong tx_multicast;	/* Multicast data packets sent to dongle */
+	ulong tx_errors;	/* Errors in sending data to dongle */
+	ulong tx_ctlpkts;	/* Control packets sent to dongle */
+	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
+	ulong rx_packets;	/* Packets sent up the network interface */
+	ulong rx_multicast;	/* Multicast packets sent up the network interface */
+	ulong rx_errors;	/* Errors processing rx data packets */
+	ulong rx_ctlpkts;	/* Control frames processed from dongle */
+	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
+	ulong rx_dropped;	/* Packets dropped locally (no memory) */
+	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
+	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
+
+	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
+	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
+	ulong fc_packets;       /* Number of flow control pkts recvd */
+
+	/* Last error return */
+	int bcmerror;
+	uint tickcnt;
+
+	/* Last error from dongle */
+	int dongle_error;
+
+	/* Suspend disable flag and "in suspend" flag */
+	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+	int in_suspend;			/* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+	int pno_enable;                 /* pno status : "1" is pno enable */
+#endif /* PNO_SUPPORT */
+	int dtim_skip;         /* dtim skip , default 0 means wake each dtim */
+
+	/* Pkt filter defination */
+	char * pktfilter[100];
+	int pktfilter_count;
+
+	wl_country_t dhd_cspec;		/* Current Locale info */
+	char eventmask[WL_EVENTING_MASK_LEN];
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+	struct wake_lock 	wakelock[WAKE_LOCK_MAX];
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	struct mutex 	wl_start_stop_lock; /* lock/unlock for Android start/stop */
+	struct mutex 	wl_softap_lock;		 /* lock/unlock for any SoftAP/STA settings */
+#endif 
+
+	uint16	maxdatablks;
+#ifdef PROP_TXSTATUS
+	int   wlfc_enabled;
+	void* wlfc_state;
+#endif
+	bool	dongle_isolation;
+
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+} dhd_pub_t;
+
+typedef struct dhd_cmn {
+	osl_t *osh;		/* OSL handle */
+	dhd_pub_t *dhd;
+} dhd_cmn_t;
+
+
+	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define _DHD_PM_RESUME_WAIT(a, b) do {\
+			int retry = 0; \
+			smp_mb(); \
+			while (dhd_mmc_suspend && retry++ != b) { \
+				wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+			} \
+		} 	while (0)
+	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)	do { if (dhd_mmc_suspend) return a; } while (0)
+	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define SPINWAIT_SLEEP(a, exp, us) do { \
+		uint countdown = (us) + 9999; \
+		while ((exp) && (countdown >= 10000)) { \
+			wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+			countdown -= 10000; \
+		} \
+	} while (0)
+
+	#else
+
+	#define DHD_PM_RESUME_WAIT_INIT(a)
+	#define DHD_PM_RESUME_WAIT(a)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)
+	#define DHD_PM_RESUME_RETURN
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a)
+	#define SPINWAIT_SLEEP(a, exp, us)  do { \
+		uint countdown = (us) + 9; \
+		while ((exp) && (countdown >= 10)) { \
+			OSL_DELAY(10);  \
+			countdown -= 10;  \
+		} \
+	} while (0)
+
+	#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+#ifndef DHDTHREAD
+#undef	SPINWAIT_SLEEP
+#define SPINWAIT_SLEEP(a, exp, us) SPINWAIT(exp, us)
+#endif /* DHDTHREAD */
+#define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/*  Wakelock Functions */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub) 			dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub) 		dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)		dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(pub)	dhd_os_wake_lock_timeout_enable(pub)
+
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+typedef struct dhd_if_event {
+	uint8 ifidx;
+	uint8 action;
+	uint8 flags;
+	uint8 bssidx;
+	uint8 is_AP;
+} dhd_if_event_t;
+
+typedef enum dhd_attach_states
+{
+	DHD_ATTACH_STATE_INIT = 0x0,
+	DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+	DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+	DHD_ATTACH_STATE_ADD_IF = 0x4,
+	DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+	DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+	DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+	DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+	DHD_ATTACH_STATE_CFG80211 = 0x80,
+	DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+	DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID 	-1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID	-2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* To allow osl_attach/detach calls from os-independent modules */
+osl_t *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(osl_t *osh);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void * dhd_os_open_image(char * filename);
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern int  dhd_custom_get_mac_address(unsigned char *buf);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid,
+                       ushort  scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_dev_pno_reset(struct net_device *dev);
+extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local,
+                           int nssid, ushort  scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_dev_pno_enable(struct net_device *dev,  int pfn_enabled);
+extern int dhd_dev_get_pno_status(struct net_device *dev);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+
+#define DHD_UNICAST_FILTER_NUM		0
+#define DHD_BROADCAST_FILTER_NUM	1
+#define DHD_MULTICAST4_FILTER_NUM	2
+#define DHD_MULTICAST6_FILTER_NUM	3
+extern int net_os_set_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+#if defined(OOB_INTR_ONLY)
+extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
+#endif /* defined(OOB_INTR_ONLY) */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+#if defined(DHDTHREAD)
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+#endif /* DHDTHREAD && DHD_GPL */
+
+typedef struct {
+	uint32 limit;		/* Expiration time (usec) */
+	uint32 increment;	/* Current expiration increment (usec) */
+	uint32 elapsed;		/* Current elapsed time (usec) */
+	uint32 tick;		/* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern struct net_device * dhd_idx2net(struct dhd_pub *dhd_pub, int ifidx);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
+                         wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+                            int ifindex);
+
+extern struct dhd_cmn *dhd_common_init(osl_t *osh);
+extern void dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn);
+
+extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+	char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
+extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int  dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+
+
+typedef enum cust_gpio_modes {
+	WLAN_RESET_ON,
+	WLAN_RESET_OFF,
+	WLAN_POWER_ON,
+	WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/*  Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#define DHD_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR	"null_pkt"
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN	2048
+extern char fw_path[MOD_PARAM_PATHLEN];
+extern char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS	16
+#define DHD_DEL_IF	-0xe
+#define DHD_BAD_IF	-0xf
+
+#ifdef PROP_TXSTATUS
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+	/*
+	b[11 ] - 1 = this packet was sent in response to one time packet request,
+	do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+	b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+	b[9  ] - 1 = packet is host->firmware (transmit direction)
+	       - 0 = packet received from firmware (firmware->host)
+	b[8  ] - 1 = packet was sent due to credit_request (pspoll),
+	             packet does not count against FIFO credit.
+	       - 0 = normal transaction, packet counts against FIFO credit
+	b[7  ] - 1 = AP, 0 = STA
+	b[6:4] - AC FIFO number
+	b[3:0] - interface index
+	*/
+	uint16	if_flags;
+	/* destination MAC address for this packet so that not every
+	module needs to open the packet to find this
+	*/
+	uint8	dstn_ether[ETHER_ADDR_LEN];
+	/*
+	This 32-bit goes from host to device for every packet.
+	*/
+	uint32	htod_tag;
+	/* bus specific stuff */
+	union {
+		struct {
+			void* stuff;
+			uint32 thing1;
+			uint32 thing2;
+		} sd;
+		struct {
+			void* bus;
+			void* urb;
+		} usb;
+	} bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue)	((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag)					(((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_IFMASK		0xf
+#define DHD_PKTTAG_IFTYPE_MASK	0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT	7
+#define DHD_PKTTAG_FIFO_MASK	0x7
+#define DHD_PKTTAG_FIFO_SHIFT	4
+
+#define DHD_PKTTAG_SIGNALONLY_MASK			0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT			10
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK		0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT		11
+
+#define DHD_PKTTAG_PKTDIR_MASK			0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT			9
+
+#define DHD_PKTTAG_CREDITCHECK_MASK		0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT	8
+
+#define DHD_PKTTAG_INVALID_FIFOID 0x7
+
+#define DHD_PKTTAG_SETFIFO(tag, fifo)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+	(((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag)			((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_SETIF(tag, if)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~DHD_PKTTAG_IFMASK) | ((if) & DHD_PKTTAG_IFMASK)
+#define DHD_PKTTAG_IF(tag)	(((dhd_pkttag_t*)(tag))->if_flags & DHD_PKTTAG_IFMASK)
+
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+	(((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+	(((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_SETPKTDIR(tag, dir)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+	(((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+	(((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+	(1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea)	memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+	(dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag)	((dhd_pkttag_t*)(tag))->dstn_ether
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+int dhd_wlfc_enable(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(struct dhd_info *, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data);
+int dhd_wlfc_event(struct dhd_info *dhd);
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do {} while (0)
+#endif
+
+#endif /* PROP_TXSTATUS */
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c
new file mode 100644
index 0000000..6b782ea
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.c
@@ -0,0 +1,335 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.c,v 1.10.4.2 2010-12-22 23:47:23 Exp $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+	uint len = sizeof(buf);
+	wl_ioctl_t ioc;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+		return BCME_BADLEN;
+
+	if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+		return BCME_BADLEN;
+
+	len = bcm_mkiovar("HCI_cmd",
+		(char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = TRUE;
+
+	return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+	int prec;
+	struct pktq *q;
+	uint count = 0;
+
+	q = dhd_bus_txq(pub->bus);
+	if (q == NULL)
+		return;
+
+	DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+	dhd_os_sdlock_txq(pub);
+
+	/* Walk through the txq and toss all HCI ACL data packets */
+	PKTQ_PREC_ITER(q, prec) {
+		void *head_pkt = NULL;
+
+		while (pktq_ppeek(q, prec) != head_pkt) {
+			void *pkt = pktq_pdeq(q, prec);
+			int ifidx;
+
+			PKTPULL(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+			dhd_prot_hdrpull(pub, &ifidx, pkt);
+
+			if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+				struct ether_header *eh =
+				        (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+				if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+					struct dot11_llc_snap_header *lsh =
+					        (struct dot11_llc_snap_header *)&eh[1];
+
+					if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+					         DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+					    ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+						amp_hci_ACL_data_t *ACL_data =
+						        (amp_hci_ACL_data_t *)&lsh[1];
+						uint16 handle = ltoh16(ACL_data->handle);
+
+						if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+							PKTFREE(pub->osh, pkt, TRUE);
+							count ++;
+							continue;
+						}
+					}
+				}
+			}
+
+			dhd_prot_hdrpush(pub, ifidx, pkt);
+			PKTPUSH(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+
+			if (head_pkt == NULL)
+				head_pkt = pkt;
+			pktq_penq(q, prec, pkt);
+		}
+	}
+
+	dhd_os_sdunlock_txq(pub);
+
+	DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ *        < 0: stop, fail
+ *        > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+	int status = 0;
+
+	switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+	case HCI_Enhanced_Flush: {
+		eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+		dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+		break;
+	}
+	default:
+		break;
+	}
+
+	return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+	int status;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+		DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+		           len, cmd_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* intercept and handle the HCI cmd locally */
+	if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+		return 0;
+	else if (status < 0)
+		return status;
+
+	/* copy in HCI cmd */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(cmd, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	ETHER_SET_LOCALADDR(eh->ether_dhost);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = 0;
+
+	return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+	amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+
+	if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+		           len, data_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* copy in HCI ACL data header and HCI ACL data */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(data, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = HTON16(BTA_PROT_L2CAP);
+
+	return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+	amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+	uint16 handle = ltoh16(ACL_data->handle);
+	uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+	wl_event_msg_t event;
+	uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+	amp_hci_event_t *evt;
+	num_completed_data_blocks_evt_parms_t *parms;
+
+	uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+	/* update the event struct */
+	memset(&event, 0, sizeof(event));
+	event.version = hton16(BCM_EVENT_MSG_VERSION);
+	event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+	event.status = 0;
+	event.reason = 0;
+	event.auth_type = 0;
+	event.datalen = hton32(len);
+	event.flags = 0;
+
+	/* generate Number of Completed Blocks event */
+	evt = (amp_hci_event_t *)data;
+	evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+	evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+	parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+	htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+	parms->num_handles = 1;
+	htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+	parms->completed[0].pkts = 1;
+	parms->completed[0].blocks = 1;
+
+	dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+	amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+	switch (evt->ecode) {
+	case HCI_Command_Complete: {
+		cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+		switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+		case HCI_Read_Data_Block_Size: {
+			read_data_block_size_evt_parms_t *parms2 =
+			        (read_data_block_size_evt_parms_t *)parms->parms;
+			dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+			break;
+		}
+		}
+		break;
+	}
+
+	case HCI_Flush_Occurred: {
+		flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+		dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+		break;
+	}
+	default:
+		break;
+	}
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h
new file mode 100644
index 0000000..07d9ceb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.h
@@ -0,0 +1,39 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.h,v 1.2 2009-02-26 22:35:56 Exp $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 0000000..bae0713
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,93 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h,v 1.14.28.1 2010-12-23 01:13:17 Exp $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+	char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                            void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 0000000..91c461f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,2318 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c,v 1.51.6.31 2011-02-09 14:31:43 Exp $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN	(16+DHD_SDALIGN)	/* Must be at least SDPCM_RESERVE
+				 * defined in dhd_sdio.c (amount of header tha might be added)
+				 * plus any space that might be needed for alignment padding.
+				 */
+#define ROUND_UP_MARGIN	2048 	/* Biggest SDIO block size possible for
+				 * round off at the end of buffer
+				 */
+
+typedef struct dhd_prot {
+	uint16 reqid;
+	uint8 pending;
+	uint32 lastcmd;
+	uint8 bus_header[BUS_HEADER_LEN];
+	cdc_ioctl_t msg;
+	unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+	int err = 0;
+	dhd_prot_t *prot = dhd->prot;
+	int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
+	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+	 *	  is actually sent to the dongle
+	 */
+	if (len > CDC_MAX_MSG_SIZE)
+		len = CDC_MAX_MSG_SIZE;
+
+	/* Send request */
+	err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+	int ret;
+	int cdc_len = len+sizeof(cdc_ioctl_t);
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	do {
+		ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+		if (ret < 0)
+			break;
+	} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+	return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	void *info;
+	int ret = 0, retries = 0;
+	uint32 id, flags = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if ((id < prot->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check info buffer */
+	info = (void*)&msg[1];
+
+	/* Copy info buffer */
+	if (buf)
+	{
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, info, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0;
+	uint32 flags, id;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		goto done;
+	}
+
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+	if (action & WL_IOCTL_ACTION_SET)
+		ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	else {
+		ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret - sizeof(cdc_ioctl_t);
+	}
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		cdc_ioctl_t *msg = &prot->msg;
+		ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+	prot->pending = FALSE;
+
+done:
+	return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                  void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+#ifdef PROP_TXSTATUS
+void
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	int i;
+	uint8* ea;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhdp->wlfc_state;
+	wlfc_hanger_t* h;
+	wlfc_mac_descriptor_t* mac_table;
+	wlfc_mac_descriptor_t* interfaces;
+	char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+	if (wlfc == NULL) {
+		bcm_bprintf(strbuf, "wlfc not initialized yet\n");
+		return;
+	}
+	h = (wlfc_hanger_t*)wlfc->hanger;
+	if (h == NULL) {
+		bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+	}
+
+	mac_table = wlfc->destination_entries.nodes;
+	interfaces = wlfc->destination_entries.interfaces;
+	bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+	if (h) {
+		bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+			"f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+			h->pushed,
+			h->popped,
+			h->failed_to_push,
+			h->failed_to_pop,
+			h->failed_slotfind,
+			(h->pushed - h->popped));
+	}
+
+	bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+		"(dq_full,sendq_full, rollback_fail) = (%d,%d,%d,%d), (%d,%d,%d)\n",
+		wlfc->stats.tlv_parse_failed,
+		wlfc->stats.credit_request_failed,
+		wlfc->stats.mac_update_failed,
+		wlfc->stats.psmode_update_failed,
+		wlfc->stats.delayq_full_error,
+		wlfc->stats.sendq_full_error,
+		wlfc->stats.rollback_failed);
+
+	bcm_bprintf(strbuf, "SENDQ (len,credit,sent) "
+		"(AC0[%d,%d,%d],AC1[%d,%d,%d],AC2[%d,%d,%d],AC3[%d,%d,%d],BC_MC[%d,%d,%d])\n",
+		wlfc->SENDQ.q[0].len, wlfc->FIFO_credit[0], wlfc->stats.sendq_pkts[0],
+		wlfc->SENDQ.q[1].len, wlfc->FIFO_credit[1], wlfc->stats.sendq_pkts[1],
+		wlfc->SENDQ.q[2].len, wlfc->FIFO_credit[2], wlfc->stats.sendq_pkts[2],
+		wlfc->SENDQ.q[3].len, wlfc->FIFO_credit[3], wlfc->stats.sendq_pkts[3],
+		wlfc->SENDQ.q[4].len, wlfc->FIFO_credit[4], wlfc->stats.sendq_pkts[4]);
+
+#ifdef PROP_TXSTATUS_DEBUG
+	bcm_bprintf(strbuf, "SENDQ dropped: AC[0-3]:(%d,%d,%d,%d), (bcmc,atim):(%d,%d)\n",
+		wlfc->stats.dropped_qfull[0], wlfc->stats.dropped_qfull[1],
+		wlfc->stats.dropped_qfull[2], wlfc->stats.dropped_qfull[3],
+		wlfc->stats.dropped_qfull[4], wlfc->stats.dropped_qfull[5]);
+#endif
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		if (interfaces[i].occupied) {
+			char* iftype_desc;
+
+			if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+				iftype_desc = "<Unknown";
+			else
+				iftype_desc = iftypes[interfaces[i].iftype];
+
+			ea = interfaces[i].ea;
+			bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s\n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				interfaces[i].interface_id,
+				iftype_desc);
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ(len,state,credit)"
+				"= (%d,%s,%d)\n",
+				i,
+				interfaces[i].psq.len,
+				((interfaces[i].state ==
+				WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+				interfaces[i].requested_credit);
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ"
+				"(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+				"(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+				i,
+				interfaces[i].psq.q[0].len,
+				interfaces[i].psq.q[1].len,
+				interfaces[i].psq.q[2].len,
+				interfaces[i].psq.q[3].len,
+				interfaces[i].psq.q[4].len,
+				interfaces[i].psq.q[5].len,
+				interfaces[i].psq.q[6].len,
+				interfaces[i].psq.q[7].len);
+		}
+	}
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (mac_table[i].occupied) {
+			ea = mac_table[i].ea;
+			bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d\n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				mac_table[i].interface_id);
+
+			bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ(len,state,credit)"
+				"= (%d,%s,%d)\n",
+				i,
+				mac_table[i].psq.len,
+				((mac_table[i].state ==
+				WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+				mac_table[i].requested_credit);
+#ifdef PROP_TXSTATUS_DEBUG
+			bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+				i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+			bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ"
+				"(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+				"(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+				i,
+				mac_table[i].psq.q[0].len,
+				mac_table[i].psq.q[1].len,
+				mac_table[i].psq.q[2].len,
+				mac_table[i].psq.q[3].len,
+				mac_table[i].psq.q[4].len,
+				mac_table[i].psq.q[5].len,
+				mac_table[i].psq.q[6].len,
+				mac_table[i].psq.q[7].len);
+		}
+	}
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		int avg;
+		int moving_avg = 0;
+		int moving_samples;
+
+		if (wlfc->stats.latency_sample_count) {
+			moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+			for (i = 0; i < moving_samples; i++)
+				moving_avg += wlfc->stats.deltas[i];
+			moving_avg /= moving_samples;
+
+			avg = (100 * wlfc->stats.total_status_latency) /
+				wlfc->stats.latency_sample_count;
+			bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+				"(%d.%d, %03d, %03d)\n",
+				moving_samples, avg/100, (avg - (avg/100)*100),
+				wlfc->stats.latency_most_recent,
+				moving_avg);
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+		"back = (%d,%d,%d,%d,%d,%d)\n",
+		wlfc->stats.fifo_credits_sent[0],
+		wlfc->stats.fifo_credits_sent[1],
+		wlfc->stats.fifo_credits_sent[2],
+		wlfc->stats.fifo_credits_sent[3],
+		wlfc->stats.fifo_credits_sent[4],
+		wlfc->stats.fifo_credits_sent[5],
+
+		wlfc->stats.fifo_credits_back[0],
+		wlfc->stats.fifo_credits_back[1],
+		wlfc->stats.fifo_credits_back[2],
+		wlfc->stats.fifo_credits_back[3],
+		wlfc->stats.fifo_credits_back[4],
+		wlfc->stats.fifo_credits_back[5]);
+	{
+		uint32 fifo_cr_sent = 0;
+		uint32 fifo_cr_acked = 0;
+		uint32 request_cr_sent = 0;
+		uint32 request_cr_ack = 0;
+		uint32 bc_mc_cr_ack = 0;
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+			fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+		}
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+			fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+		}
+
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_sent +=
+					wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_sent +=
+				wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.nodes[i].dstncredit_acks;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.interfaces[i].dstncredit_acks;
+			}
+		}
+		bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+			"other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+			fifo_cr_sent, fifo_cr_acked,
+			request_cr_sent, request_cr_ack,
+			wlfc->destination_entries.other.dstncredit_acks,
+			bc_mc_cr_ack,
+			wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+	}
+#endif /* PROP_TXSTATUS_DEBUG */
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull),(dropped,hdr_only,wlc_tossed)"
+		"(freed,free_err,rollback)) = "
+		"((%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.pktin,
+		wlfc->stats.pkt2bus,
+		wlfc->stats.txstatus_in,
+		wlfc->stats.dhd_hdrpulls,
+
+		wlfc->stats.pktdropped,
+		wlfc->stats.wlfc_header_only_pkt,
+		wlfc->stats.wlc_tossed_pkts,
+
+		wlfc->stats.pkt_freed,
+		wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+	bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+		"((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+
+		wlfc->stats.d11_suppress,
+		wlfc->stats.wl_suppress,
+		wlfc->stats.bad_suppress,
+
+		wlfc->stats.psq_d11sup_enq,
+		wlfc->stats.psq_wlsup_enq,
+		wlfc->stats.psq_hostq_enq,
+		wlfc->stats.mac_handle_notfound,
+
+		wlfc->stats.psq_d11sup_retx,
+		wlfc->stats.psq_wlsup_retx,
+		wlfc->stats.psq_hostq_retx);
+	return;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until 
+	a status comes back, suppress or otherwise.
+
+	hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+	int i;
+	wlfc_hanger_t* hanger;
+
+	/* allow only up to a specific size for now */
+	ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+	if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+		return NULL;
+
+	memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+	hanger->max_items = max_items;
+
+	for (i = 0; i < hanger->max_items; i++) {
+		hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+	}
+	return hanger;
+}
+
+static int
+dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+		return BCME_OK;
+	}
+	return BCME_BADARG;
+}
+
+static uint16
+dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+	int i;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		for (i = 0; i < h->max_items; i++) {
+			if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE)
+				return (uint16)i;
+		}
+	}
+	h->failed_slotfind++;
+	return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+			h->items[slot_id].pkt = pkt;
+			h->items[slot_id].identifier = slot_id;
+			h->pushed++;
+		}
+		else {
+			h->failed_to_push++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+			*pktout = h->items[slot_id].pkt;
+			if (remove_from_hanger) {
+				h->items[slot_id].state =
+					WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[slot_id].pkt = NULL;
+				h->items[slot_id].identifier = 0;
+				h->popped++;
+			}
+		}
+		else {
+			h->failed_to_pop++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+	uint8 tim_bmp, uint8 mac_handle, uint32 htodtag)
+{
+	uint32 wl_pktinfo = 0;
+	uint8* wlh;
+	uint8 dataOffset;
+	uint8 fillers;
+	uint8 tim_signal_len = 0;
+
+	struct bdc_header *h;
+
+	if (tim_signal) {
+		tim_signal_len = 1 + 1 + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+	}
+
+	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+	dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + 2 + tim_signal_len;
+	fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+	dataOffset += fillers;
+
+	PKTPUSH(ctx->osh, p, dataOffset);
+	wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+	wl_pktinfo = htol32(htodtag);
+
+	wlh[0] = WLFC_CTL_TYPE_PKTTAG;
+	wlh[1] = WLFC_CTL_VALUE_LEN_PKTTAG;
+	memcpy(&wlh[2], &wl_pktinfo, sizeof(uint32));
+
+	if (tim_signal_len) {
+		wlh[dataOffset - fillers - tim_signal_len ] =
+			WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 1] =
+			WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+		wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+	}
+	if (fillers)
+		memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+	PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+	h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(p))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = dataOffset >> 2;
+	BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+	struct bdc_header *h;
+
+	if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+		WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+	h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+	/* pull BDC header */
+	PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+	/* pull wl-header */
+	PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+	return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+	int i;
+	wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+	uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+	uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+
+	/* no lookup necessary, only if this packet belongs to STA interface */
+	if (((ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_STA) ||
+		ETHER_ISMULTI(dstn) ||
+		(ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_P2P_CLIENT)) &&
+		(ctx->destination_entries.interfaces[ifid].occupied)) {
+			return &ctx->destination_entries.interfaces[ifid];
+	}
+
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (table[i].occupied) {
+			if (table[i].interface_id == ifid) {
+				if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN))
+					return &table[i];
+			}
+		}
+	}
+	return &ctx->destination_entries.other;
+}
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+	void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+	/*
+	put the packet back to the head of queue
+
+	- a packet from send-q will need to go back to send-q and not delay-q
+	since that will change the order of packets.
+	- suppressed packet goes back to suppress sub-queue
+	- pull out the header, if new or delayed packet
+
+	Note: hslot is used only when header removal is done.
+	*/
+	wlfc_mac_descriptor_t* entry;
+	void* pktout;
+	int rc = BCME_OK;
+	int prec;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+	if (entry != NULL) {
+		if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) {
+			/* wl-header is saved for suppressed packets */
+			if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+				WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+				rc = BCME_ERROR;
+			}
+		}
+		else {
+			/* remove header first */
+			_dhd_wlfc_pullheader(ctx, p);
+
+			if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+				/* delay-q packets are going to delay-q */
+				if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, (prec << 1), p) == NULL) {
+					WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+					rc = BCME_ERROR;
+				}
+			}
+			else {
+				/* these are going to SENDQ */
+				if (WLFC_PKTQ_PENQ_HEAD(&ctx->SENDQ, prec, p) == NULL) {
+					WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+					rc = BCME_ERROR;
+				}
+			}
+			/* free the hanger slot */
+			dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1);
+
+			/* decrement sequence count */
+			WLFC_DECR_SEQCOUNT(entry, prec);
+		}
+		/*
+		if this packet did not count against FIFO credit, it must have
+		taken a requested_credit from the firmware (for pspoll etc.)
+		*/
+		if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+			entry->requested_credit++;
+		}
+	}
+	else {
+		WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		rc = BCME_ERROR;
+	}
+	if (rc != BCME_OK)
+		ctx->stats.rollback_failed++;
+	else
+		ctx->stats.rollback++;
+
+	return rc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+	if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+		/* start traffic */
+		ctx->hostif_flow_state[if_id] = OFF;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("F"));
+		/* dhd_txflowcontrol(ctx->dhdp, if_id, OFF); */
+		ctx->toggle_host_if = 0;
+	}
+	if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+		/* stop traffic */
+		ctx->hostif_flow_state[if_id] = ON;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic   %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("N"));
+		/* dhd_txflowcontrol(ctx->dhdp, if_id, ON); */
+		ctx->host_ifidx = if_id;
+		ctx->toggle_host_if = 1;
+	}
+	return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 ta_bmp)
+{
+	int rc = BCME_OK;
+	void* p = NULL;
+	int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 12;
+
+	/* allocate a dummy packet */
+	p = PKTGET(ctx->osh, dummylen, TRUE);
+	if (p) {
+		PKTPULL(ctx->osh, p, dummylen);
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+		_dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0);
+		DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+		ctx->stats.signal_only_pkts_sent++;
+#endif
+		dhd_bus_txdata(((dhd_pub_t *)ctx->dhdp)->bus, p);
+	}
+	else {
+		DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+		           __FUNCTION__, dummylen));
+		rc = BCME_NOMEM;
+	}
+	return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	int prec)
+{
+	bool rc = FALSE;
+
+	if (entry->state == WLFC_STATE_CLOSE) {
+		if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+			(pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+			if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp & ~ NBITVAL(prec);
+			}
+		}
+		else {
+			if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp | NBITVAL(prec);
+			}
+		}
+	}
+	if (rc) {
+		/* request a TIM update to firmware at the next piggyback opportunity */
+		if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+			entry->send_tim_signal = 1;
+			_dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+			entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+			entry->send_tim_signal = 0;
+		}
+		else {
+			rc = FALSE;
+		}
+	}
+	return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	if (entry == NULL) {
+		WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_NOTFOUND;
+	}
+	/*
+	- suppressed packets go to sub_queue[2*prec + 1] AND
+	- delayed packets go to sub_queue[2*prec + 0] to ensure
+	order of delivery.
+	*/
+	if (WLFC_PKTQ_PENQ(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+		ctx->stats.delayq_full_error++;
+		/* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+		WLFC_DBGMESG(("s"));
+		return BCME_ERROR;
+	}
+	/* A packet has been pushed, update traffic availability bitmap, if applicable */
+	_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+	int rc = BCME_OK;
+	int hslot = WLFC_HANGER_MAXITEMS;
+	bool send_tim_update = FALSE;
+	uint32 htod = 0;
+	uint8 free_ctr;
+
+	*slot = hslot;
+
+	if (entry == NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, p);
+	}
+
+	if (entry == NULL) {
+		WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_ERROR;
+	}
+	if (entry->send_tim_signal) {
+		send_tim_update = TRUE;
+		entry->send_tim_signal = 0;
+		entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+	}
+	if (header_needed) {
+		hslot = dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+		free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+	}
+	else {
+		hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	}
+	WLFC_PKTID_HSLOT_SET(htod, hslot);
+	WLFC_PKTID_FREERUNCTR_SET(htod, free_ctr);
+	DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+	WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+	WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	WLFC_PKTFLAG_SET_GENERATION(htod, entry->generation);
+
+	if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+		/*
+		Indicate that this packet is being sent in response to an
+		explicit request from the firmware side.
+		*/
+		WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+	}
+	else {
+		WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+	}
+	if (header_needed) {
+		rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+			entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+		if (rc == BCME_OK) {
+			DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+			/*
+			a new header was created for this packet.
+			push to hanger slot and scrub q. Since bus
+			send succeeded, increment seq number as well.
+			*/
+			rc = dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+			if (rc == BCME_OK) {
+				/* increment free running sequence count */
+				WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+#ifdef PROP_TXSTATUS_DEBUG
+				((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+					OSL_SYSUPTIME();
+#endif
+			}
+			else {
+				WLFC_DBGMESG(("%s() hanger_pushpkt() failed, rc: %d\n",
+					__FUNCTION__, rc));
+			}
+		}
+	}
+	else {
+		/* remove old header */
+		_dhd_wlfc_pullheader(ctx, p);
+
+		hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		/* push new header */
+		_dhd_wlfc_pushheader(ctx, p, send_tim_update,
+			entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+	}
+	*slot = hslot;
+	return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_closed(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, int prec)
+{
+	if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+		WLC_E_IF_ROLE_P2P_GO) {
+		/* - destination interface is of type p2p GO.
+		For a p2pGO interface, if the destination is OPEN but the interface is
+		CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+		destination-specific-credit left send packets. This is because the
+		firmware storing the destination-specific-requested packet in queue.
+		*/
+		if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+			(entry->requested_packet == 0))
+			return 1;
+	}
+	/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+	if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+		(entry->requested_packet == 0)) ||
+		(!(entry->ac_bitmap & (1 << prec))))
+		return 1;
+
+	return 0;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx,
+	int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out)
+{
+	wlfc_mac_descriptor_t* entry;
+	wlfc_mac_descriptor_t* table;
+	uint8 token_pos;
+	int total_entries;
+	void* p = NULL;
+	int pout;
+	int i;
+
+	*entry_out = NULL;
+	token_pos = ctx->token_pos[prec];
+	/* most cases a packet will count against FIFO credit */
+	*ac_credit_spent = 1;
+	*needs_hdr = 1;
+
+	/* search all entries, include nodes as well as interfaces */
+	table = (wlfc_mac_descriptor_t*)&ctx->destination_entries;
+	total_entries = sizeof(ctx->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+
+	for (i = 0; i < total_entries; i++) {
+		entry = &table[(token_pos + i) % total_entries];
+		if (entry->occupied) {
+			if (!_dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+				p = pktq_mdeq(&entry->psq,
+					/* higher precedence will be picked up first,
+					i.e. suppressed packets before delayed ones
+					*/
+					(NBITVAL((prec << 1) + 1) | NBITVAL((prec << 1))),
+					&pout);
+				if (p != NULL) {
+					/* did the packet come from suppress sub-queue? */
+					if (pout == ((prec << 1) + 1)) {
+						/*
+						this packet was suppressed and was sent on the bus
+						previously; this already has a header
+						*/
+						*needs_hdr = 0;
+					}
+					if (entry->requested_credit > 0) {
+						entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+						entry->dstncredit_sent_packets++;
+#endif
+						/*
+						if the packet was pulled out while destination is in
+						closed state but had a non-zero packets requested,
+						then this should not count against the FIFO credit.
+						That is due to the fact that the firmware will
+						most likely hold onto this packet until a suitable
+						time later to push it to the appropriate  AC FIFO.
+						*/
+						if (entry->state == WLFC_STATE_CLOSE)
+							*ac_credit_spent = 0;
+					}
+					else if (entry->requested_packet > 0) {
+						entry->requested_packet--;
+						DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+						if (entry->state == WLFC_STATE_CLOSE)
+							*ac_credit_spent = 0;
+					}
+					/* move token to ensure fair round-robin */
+					ctx->token_pos[prec] =
+						(token_pos + i + 1) % total_entries;
+					*entry_out = entry;
+					_dhd_wlfc_flow_control_check(ctx, &entry->psq,
+						DHD_PKTTAG_IF(PKTTAG(p)));
+					/*
+					A packet has been picked up, update traffic
+					availability bitmap, if applicable
+					*/
+					_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+					return p;
+				}
+			}
+		}
+	}
+	return NULL;
+}
+
+static void*
+_dhd_wlfc_deque_sendq(athost_wl_status_info_t* ctx, int prec, uint8* ac_credit_spent)
+{
+	wlfc_mac_descriptor_t* entry;
+	void* p;
+
+	/* most cases a packet will count against FIFO credit */
+	*ac_credit_spent = 1;
+
+	p = pktq_pdeq(&ctx->SENDQ, prec);
+	if (p != NULL) {
+		if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))
+			/* bc/mc packets do not have a delay queue */
+			return p;
+
+		entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+		if (entry == NULL) {
+			WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return p;
+		}
+
+		while ((p != NULL) && _dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+			/*
+			- suppressed packets go to sub_queue[2*prec + 1] AND
+			- delayed packets go to sub_queue[2*prec + 0] to ensure
+			order of delivery.
+			*/
+			if (WLFC_PKTQ_PENQ(&entry->psq, (prec << 1), p) == NULL) {
+				WLFC_DBGMESG(("D"));
+				/* dhd_txcomplete(ctx->dhdp, p, FALSE); */
+				PKTFREE(ctx->osh, p, TRUE);
+				ctx->stats.delayq_full_error++;
+			}
+			/*
+			A packet has been pushed, update traffic availability bitmap,
+			if applicable
+			*/
+			_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+			_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+			p = pktq_pdeq(&ctx->SENDQ, prec);
+			if (p == NULL)
+				break;
+
+			entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+			if ((entry == NULL) || (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))) {
+				return p;
+			}
+		}
+		if (p) {
+			if (entry->requested_packet == 0) {
+				if (entry->requested_credit > 0)
+					entry->requested_credit--;
+			}
+			else {
+				entry->requested_packet--;
+				DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+			}
+			if (entry->state == WLFC_STATE_CLOSE)
+				*ac_credit_spent = 0;
+#ifdef PROP_TXSTATUS_DEBUG
+			entry->dstncredit_sent_packets++;
+#endif
+		}
+		if (p)
+			_dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p)));
+	}
+	return p;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	int rc = BCME_OK;
+
+	if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+		entry->occupied = 1;
+		entry->state = WLFC_STATE_OPEN;
+		entry->requested_credit = 0;
+		entry->interface_id = ifid;
+		entry->iftype = iftype;
+		entry->ac_bitmap = 0xff; /* update this when handling APSD */
+		/* for an interface entry we may not care about the MAC address */
+		if (ea != NULL)
+			memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+		pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+	}
+	else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+		entry->occupied = 0;
+		entry->state = WLFC_STATE_CLOSE;
+		entry->requested_credit = 0;
+		/* enable after packets are queued-deqeued properly.
+		pktq_flush(dhd->osh, &entry->psq, FALSE, NULL, 0);
+		*/
+	}
+	return rc;
+}
+
+int
+dhd_wlfc_interface_entry_update(void* state,
+	ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	wlfc_mac_descriptor_t* entry;
+
+	if (ifid >= WLFC_MAX_IFNUM)
+		return BCME_BADARG;
+
+	entry = &ctx->destination_entries.interfaces[ifid];
+	return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea);
+}
+
+int
+dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+	/* update the AC FIFO credit map */
+	ctx->FIFO_credit[0] = credits[0];
+	ctx->FIFO_credit[1] = credits[1];
+	ctx->FIFO_credit[2] = credits[2];
+	ctx->FIFO_credit[3] = credits[3];
+	/* credit for bc/mc packets */
+	ctx->FIFO_credit[4] = credits[4];
+	/* credit for ATIM FIFO is not used yet. */
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_enque_sendq(void* state, int prec, void* p)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+	if ((state == NULL) ||
+		/* prec = AC_COUNT is used for bc/mc queue */
+		(prec > AC_COUNT) ||
+		(p == NULL)) {
+		WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+	if (FALSE == dhd_prec_enq(ctx->dhdp, &ctx->SENDQ, p, prec)) {
+		ctx->stats.sendq_full_error++;
+		/*
+		WLFC_DBGMESG(("Error: %s():%d, qlen:%d\n",
+		__FUNCTION__, __LINE__, ctx->SENDQ.len));
+		*/
+		WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, prec);
+		WLFC_DBGMESG(("Q"));
+		PKTFREE(ctx->osh, p, TRUE);
+		return BCME_ERROR;
+	}
+	ctx->stats.pktin++;
+	/* _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p))); */
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx)
+{
+	int ac;
+	int credit;
+	uint8 ac_fifo_credit_spent;
+	uint8 needs_hdr;
+	uint32 hslot;
+	void* p;
+	int rc;
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	wlfc_mac_descriptor_t* mac_entry;
+
+	if ((state == NULL) ||
+		(fcommit == NULL)) {
+		WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	/* 
+	Commit packets for regular AC traffic. Higher priority first.
+
+	-NOTE:
+	If the bus between the host and firmware is overwhelmed by the
+	traffic from host, it is possible that higher priority traffic
+	starves the lower priority queue. If that occurs often, we may
+	have to employ weighted round-robin or ucode scheme to avoid
+	low priority packet starvation.
+	*/
+	for (ac = AC_COUNT; ac >= 0; ac--) {
+		for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+			p = _dhd_wlfc_deque_delayedq(ctx, ac, &ac_fifo_credit_spent, &needs_hdr,
+				&mac_entry);
+			if (p == NULL)
+				break;
+			/*
+			if ac_fifo_credit_spent = 0
+
+			This packet will not count against the FIFO credit.
+			To ensure the txstatus corresponding to this packet
+			does not provide an implied credit (default behavior)
+			mark the packet accordingly.
+
+			if ac_fifo_credit_spent = 1
+
+			This is a normal packet and it counts against the FIFO
+			credit count.
+			*/
+			DHD_PKTTAG_SETCREDITCHECK(PKTTAG(p), ac_fifo_credit_spent);
+			rc = _dhd_wlfc_pretx_pktprocess(ctx, mac_entry, p, needs_hdr, &hslot);
+
+			if (rc == BCME_OK)
+				rc = fcommit(commit_ctx, p);
+			else
+				ctx->stats.generic_error++;
+
+			if (rc == BCME_OK) {
+				ctx->stats.pkt2bus++;
+				if (ac_fifo_credit_spent) {
+					ctx->stats.sendq_pkts[ac]++;
+					WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+					/*
+					1 FIFO credit has been spent by sending this packet
+					to the device.
+					*/
+					credit++;
+				}
+			}
+			else {
+				/* bus commit has failed, rollback. */
+				rc = _dhd_wlfc_rollback_packet_toq(ctx,
+					p,
+					/*
+					- remove wl-header for a delayed packet
+					- save wl-header header for suppressed packets
+					*/
+					(needs_hdr ? eWLFC_PKTTYPE_DELAYED :
+					eWLFC_PKTTYPE_SUPPRESSED),
+					hslot);
+				if (rc != BCME_OK)
+					ctx->stats.rollback_failed++;
+			}
+		}
+		ctx->FIFO_credit[ac] -= credit;
+		/* packets from SENDQ are fresh and they'd need header */
+		needs_hdr = 1;
+		for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+			p = _dhd_wlfc_deque_sendq(ctx, ac, &ac_fifo_credit_spent);
+			if (p == NULL)
+				break;
+
+			DHD_PKTTAG_SETCREDITCHECK(PKTTAG(p), ac_fifo_credit_spent);
+			rc = _dhd_wlfc_pretx_pktprocess(ctx, NULL, p, needs_hdr, &hslot);
+			if (rc == BCME_OK)
+				rc = fcommit(commit_ctx, p);
+			else
+				ctx->stats.generic_error++;
+
+			if (rc == BCME_OK) {
+				ctx->stats.pkt2bus++;
+				if (ac_fifo_credit_spent) {
+					WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+					ctx->stats.sendq_pkts[ac]++;
+					credit++;
+				}
+			}
+			else {
+				/* bus commit has failed, rollback. */
+				rc = _dhd_wlfc_rollback_packet_toq(ctx,
+					p,
+					/* remove wl-header while rolling back */
+					eWLFC_PKTTYPE_NEW,
+					hslot);
+				if (rc != BCME_OK)
+					ctx->stats.rollback_failed++;
+			}
+		}
+		ctx->FIFO_credit[ac] -= credit;
+	}
+	return BCME_OK;
+}
+
+static uint8
+dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+	wlfc_mac_descriptor_t* table =
+		((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+	uint8 table_index;
+
+	if (ea != NULL) {
+		for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+			if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+				table[table_index].occupied)
+				return table_index;
+		}
+	}
+	return WLFC_MAC_DESC_ID_INVALID;
+}
+
+void
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	void* p;
+
+	if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.signal_only_pkts_freed++;
+#endif
+		/* is this a signal-only packet? */
+		PKTFREE(wlfc->osh, txp, TRUE);
+		return;
+	}
+	if (!success) {
+		WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+			__FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+		dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG
+			(PKTTAG(txp))), &p, 1);
+
+		/* indicate failure and free the packet */
+		dhd_txcomplete(dhd, txp, FALSE);
+		PKTFREE(wlfc->osh, txp, TRUE);
+
+		/* return the credit, if necessary */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp)))
+			wlfc->FIFO_credit[DHD_PKTTAG_FIFO(PKTTAG(txp))]++;
+	}
+	return;
+}
+
+/* Handle discard or suppress indication */
+static int
+dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info)
+{
+	uint8 	status_flag;
+	uint32	status;
+	int		ret;
+	int		remove_from_hanger = 1;
+	void*	pktbuf;
+	uint8	fifo_id;
+	wlfc_mac_descriptor_t* entry = NULL;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+
+	memcpy(&status, pkt_info, sizeof(uint32));
+	status_flag = WL_TXSTATUS_GET_FLAGS(status);
+	wlfc->stats.txstatus_in++;
+
+	if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+		wlfc->stats.pkt_freed++;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+		wlfc->stats.d11_suppress++;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+		wlfc->stats.wl_suppress++;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+		wlfc->stats.wlc_tossed_pkts++;
+	}
+
+	ret = dhd_wlfc_hanger_poppkt(wlfc->hanger,
+		WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger);
+	if (ret != BCME_OK) {
+		/* do something */
+		return ret;
+	}
+
+	if (!remove_from_hanger) {
+		/* this packet was suppressed */
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+		entry->generation = WLFC_PKTID_GEN(status);
+	}
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		uint32 new_t = OSL_SYSUPTIME();
+		uint32 old_t;
+		uint32 delta;
+		old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[
+			WLFC_PKTID_HSLOT_GET(status)].push_time;
+
+
+		wlfc->stats.latency_sample_count++;
+		if (new_t > old_t)
+			delta = new_t - old_t;
+		else
+			delta = 0xffffffff + new_t - old_t;
+		wlfc->stats.total_status_latency += delta;
+		wlfc->stats.latency_most_recent = delta;
+
+		wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+		if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+			wlfc->stats.idx_delta = 0;
+	}
+#endif /* PROP_TXSTATUS_DEBUG */
+
+	fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+	/* pick up the implicit credit from this packet */
+	if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+		if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) {
+			wlfc->FIFO_credit[fifo_id]++;
+		}
+	}
+	else {
+		/*
+		if this packet did not count against FIFO credit, it must have
+		taken a requested_credit from the destination entry (for pspoll etc.)
+		*/
+		if (!entry) {
+
+			entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+		}
+		if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+			entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+		entry->dstncredit_acks++;
+#endif
+	}
+	if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+		(status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+		ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+		if (ret != BCME_OK) {
+			/* delay q is full, drop this packet */
+			dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status),
+			&pktbuf, 1);
+
+			/* indicate failure and free the packet */
+			dhd_txcomplete(dhd, pktbuf, FALSE);
+			PKTFREE(wlfc->osh, pktbuf, TRUE);
+		}
+	}
+	else {
+		dhd_txcomplete(dhd, pktbuf, TRUE);
+		/* free the packet */
+		PKTFREE(wlfc->osh, pktbuf, TRUE);
+	}
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+	int i;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+		/* update FIFO credits */
+		if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+			wlfc->FIFO_credit[i] += credits[i];
+	}
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+	(void)dhd;
+	(void)rssi;
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	int rc;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 existing_index;
+	uint8 table_index;
+	uint8 ifid;
+	uint8* ea;
+
+	WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+		__FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+		((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+		WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+	table = wlfc->destination_entries.nodes;
+	table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+	ifid = value[1];
+	ea = &value[2];
+
+	if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+		existing_index = dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+		if (existing_index == WLFC_MAC_DESC_ID_INVALID) {
+			/* this MAC entry does not exist, create one */
+			if (!table[table_index].occupied) {
+				table[table_index].mac_handle = value[0];
+				rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+				eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+				wlfc->destination_entries.interfaces[ifid].iftype,
+				ea);
+			}
+			else {
+				/* the space should have been empty, but it's not */
+				wlfc->stats.mac_update_failed++;
+			}
+		}
+		else {
+			/*
+			there is an existing entry, move it to new index
+			if necessary.
+			*/
+			if (existing_index != table_index) {
+				/* if we already have an entry, free the old one */
+				table[existing_index].occupied = 0;
+				table[existing_index].state = WLFC_STATE_CLOSE;
+				table[existing_index].requested_credit = 0;
+				table[existing_index].interface_id = 0;
+			}
+		}
+	}
+	if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+		if (table[table_index].occupied) {
+				rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+					eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+					wlfc->destination_entries.interfaces[ifid].iftype,
+					ea);
+		}
+		else {
+			/* the space should have been occupied, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle = value[0];
+	int i;
+
+	table = wlfc->destination_entries.nodes;
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		/* a fresh PS mode should wipe old ps credits? */
+		desc->requested_credit = 0;
+		if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+			desc->state = WLFC_STATE_OPEN;
+			DHD_WLFC_CTRINC_MAC_OPEN(desc);
+		}
+		else {
+			desc->state = WLFC_STATE_CLOSE;
+			DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+			/*
+			Indicate to firmware if there is any traffic pending.
+			*/
+			for (i = AC_BE; i < AC_COUNT; i++) {
+				_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+			}
+		}
+	}
+	else {
+		wlfc->stats.psmode_update_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 if_id = value[0];
+
+	if (if_id < WLFC_MAX_IFNUM) {
+		table = wlfc->destination_entries.interfaces;
+		if (table[if_id].occupied) {
+			if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+				table[if_id].state = WLFC_STATE_OPEN;
+				/* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+			}
+			else {
+				table[if_id].state = WLFC_STATE_CLOSE;
+				/* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+			}
+			return BCME_OK;
+		}
+	}
+	wlfc->stats.interface_update_failed++;
+
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 credit;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	credit = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_credit = credit;
+
+		desc->ac_bitmap = value[2];
+	}
+	else {
+		wlfc->stats.credit_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 packet_count;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	packet_count = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_packet = packet_count;
+
+		desc->ac_bitmap = value[2];
+	}
+	else {
+		wlfc->stats.packet_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len)
+{
+	uint8 type, len;
+	uint8* value;
+	uint8* tmpbuf;
+	uint16 remainder = tlv_hdr_len;
+	uint16 processed = 0;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+	if (remainder) {
+		while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+			type = tmpbuf[processed];
+			if (type == WLFC_CTL_TYPE_FILLER) {
+				remainder -= 1;
+				processed += 1;
+				continue;
+			}
+
+			len  = tmpbuf[processed + 1];
+			value = &tmpbuf[processed + 2];
+
+			if (remainder < (2 + len))
+				break;
+
+			remainder -= 2 + len;
+			processed += 2 + len;
+			if (type == WLFC_CTL_TYPE_TXSTATUS)
+				dhd_wlfc_txstatus_update(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+				dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_RSSI)
+				dhd_wlfc_rssi_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+				dhd_wlfc_credit_request(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+				dhd_wlfc_packet_request(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+				(type == WLFC_CTL_TYPE_MAC_CLOSE))
+				dhd_wlfc_psmode_update(dhd, value, type);
+
+			else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+				(type == WLFC_CTL_TYPE_MACDESC_DEL))
+				dhd_wlfc_mac_table_update(dhd, value, type);
+
+			else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+				(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+				dhd_wlfc_interface_update(dhd, value, type);
+			}
+		}
+		if (remainder != 0) {
+			/* trouble..., something is not right */
+			wlfc->stats.tlv_parse_failed++;
+		}
+	}
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+	char iovbuf[12]; /* Room for "tlv" + '\0' + parameter */
+	/* enable all signals & indicate host proptxstatus logic is active */
+	uint32 tlv = dhd->wlfc_enabled?
+		WLFC_FLAGS_RSSI_SIGNALS |
+		WLFC_FLAGS_XONXOFF_SIGNALS |
+		WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+		WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE : 0;
+
+	dhd->wlfc_state  = NULL;
+
+	/*
+	try to enable/disable signaling by sending "tlv" iovar. if that fails,
+	fallback to no flow control? Print a message for now.
+	*/
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	}
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+	int i;
+	athost_wl_status_info_t* wlfc;
+
+	if (!dhd->wlfc_enabled || dhd->wlfc_state)
+		return BCME_OK;
+
+	/* allocate space to track txstatus propagated from firmware */
+	dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+	if (dhd->wlfc_state == NULL)
+		return BCME_NOMEM;
+
+	/* initialize state space */
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+	/* remember osh & dhdp */
+	wlfc->osh = dhd->osh;
+	wlfc->dhdp = dhd;
+
+	wlfc->hanger =
+		dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+	if (wlfc->hanger == NULL) {
+		MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+		dhd->wlfc_state = NULL;
+		return BCME_NOMEM;
+	}
+
+	/* initialize all interfaces to accept traffic */
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		wlfc->hostif_flow_state[i] = OFF;
+	}
+
+	/* 
+	create the SENDQ containing
+	sub-queues for all AC precedences + 1 for bc/mc traffic
+	*/
+	pktq_init(&wlfc->SENDQ, (AC_COUNT + 1), WLFC_SENDQ_LEN);
+
+	wlfc->destination_entries.other.state = WLFC_STATE_OPEN;
+	/* bc/mc FIFO is always open [credit aside], i.e. b[5] */
+	wlfc->destination_entries.other.ac_bitmap = 0x1f;
+	wlfc->destination_entries.other.interface_id = 0;
+
+	wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+
+	return BCME_OK;
+}
+
+/* release all packet resources */
+void
+dhd_wlfc_cleanup(dhd_pub_t *dhd)
+{
+	int i;
+	int total_entries;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_hanger_t* h;
+
+	if (dhd->wlfc_state == NULL)
+		return;
+
+	total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+	/* search all entries, include nodes as well as interfaces */
+	table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+	for (i = 0; i < total_entries; i++) {
+		if (table[i].occupied) {
+			if (table[i].psq.len) {
+				WLFC_DBGMESG(("%s(): DELAYQ[%d].len = %d\n",
+					__FUNCTION__, i, table[i].psq.len));
+				/* release packets held in DELAYQ */
+				pktq_flush(wlfc->osh, &table[i].psq, TRUE, NULL, 0);
+			}
+			table[i].occupied = 0;
+		}
+	}
+	/* release packets held in SENDQ */
+	if (wlfc->SENDQ.len)
+		pktq_flush(wlfc->osh, &wlfc->SENDQ, TRUE, NULL, 0);
+	/* any in the hanger? */
+	h = (wlfc_hanger_t*)wlfc->hanger;
+	for (i = 0; i < h->max_items; i++) {
+		if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+			PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
+		}
+	}
+	return;
+}
+
+void
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+	/* cleanup all psq related resources */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+		dhd->wlfc_state;
+
+	if (dhd->wlfc_state == NULL)
+		return;
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		int i;
+		wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+		for (i = 0; i < h->max_items; i++) {
+			if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+				WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+					__FUNCTION__, i, h->items[i].pkt,
+					DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+			}
+		}
+	}
+#endif
+	/* delete hanger */
+	dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+
+	/* free top structure */
+	MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+	dhd->wlfc_state = NULL;
+	return;
+}
+#endif /* PROP_TXSTATUS */
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+	if (dhdp->wlfc_state)
+		dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif /* BDC */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Push BDC header used to convey priority for buses that don't */
+
+	PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN);
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(pktbuf))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = 0;
+#endif /* BDC */
+	BDC_SET_IF_IDX(h, ifidx);
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Pop BDC header used to convey priority for buses that don't */
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+		           __FUNCTION__, *ifidx));
+		return BCME_ERROR;
+	}
+
+	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+		DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+		           dhd_ifname(dhd, *ifidx), h->flags));
+		if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+			h->dataOffset = 0;
+		else
+			return BCME_ERROR;
+	}
+
+	if (h->flags & BDC_FLAG_SUM_GOOD) {
+		DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+		          dhd_ifname(dhd, *ifidx), h->flags));
+		PKTSETSUMGOOD(pktbuf, TRUE);
+	}
+
+	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+	PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+	if (PKTLEN(dhd->osh, pktbuf) < (uint32) (h->dataOffset << 2)) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), (h->dataOffset * 4)));
+		return BCME_ERROR;
+	}
+
+#ifdef PROP_TXSTATUS
+	if (dhd->wlfc_state &&
+		((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode
+		!= WLFC_FCMODE_NONE &&
+		(!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf)))) {
+		/*
+		- parse txstatus only for packets that came from the firmware
+		*/
+		dhd_os_wlfc_block(dhd);
+		dhd_wlfc_parse_header_info(dhd, pktbuf, (h->dataOffset << 2));
+		((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++;
+		dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+			dhd->bus);
+		dhd_os_wlfc_unblock(dhd);
+	}
+#endif /* PROP_TXSTATUS */
+	PKTPULL(dhd->osh, pktbuf, (h->dataOffset << 2));
+	return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *cdc;
+
+	if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd->osh, DHD_PREALLOC_PROT,
+		sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(cdc, 0, sizeof(dhd_prot_t));
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+		DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+		goto fail;
+	}
+
+	dhd->prot = cdc;
+#ifdef BDC
+	dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+	dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+	return 0;
+
+fail:
+#ifndef DHD_USE_STATIC_BUF
+	if (cdc != NULL)
+		MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
+#endif
+	return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_deinit(dhd);
+#endif
+#ifndef DHD_USE_STATIC_BUF
+	MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif
+	dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+	/* No stats from dongle added yet, copy bus stats */
+	dhd->dstats.tx_packets = dhd->tx_packets;
+	dhd->dstats.tx_errors = dhd->tx_errors;
+	dhd->dstats.rx_packets = dhd->rx_packets;
+	dhd->dstats.rx_errors = dhd->rx_errors;
+	dhd->dstats.rx_dropped = dhd->rx_dropped;
+	dhd->dstats.multicast = dhd->rx_multicast;
+	return;
+}
+
+int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+	int power_mode = PM_MAX;
+	wl_pkt_filter_enable_t	enable_parm;
+	char iovbuf[32];
+	int bcn_li_dtim = 3;
+
+#define htod32(i) i
+
+	if (dhd && dhd->up) {
+		if (value) {
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
+				(char *)&power_mode, sizeof(power_mode), TRUE, 0);
+			/* Enable packet filter, only allow unicast packet to send up */
+			enable_parm.id = htod32(100);
+			enable_parm.enable = htod32(1);
+			bcm_mkiovar("pkt_filter_enable", (char *)&enable_parm,
+				sizeof(wl_pkt_filter_enable_t), iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			/* set bcn_li_dtim */
+			bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+				4, iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+		} else {
+			power_mode = PM_FAST;
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				sizeof(power_mode), TRUE, 0);
+			/* disable pkt filter */
+			enable_parm.id = htod32(100);
+			enable_parm.enable = htod32(0);
+			bcm_mkiovar("pkt_filter_enable", (char *)&enable_parm,
+				sizeof(wl_pkt_filter_enable_t), iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			/* set bcn_li_dtim */
+			bcn_li_dtim = 0;
+			bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+				4, iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+		}
+	}
+
+	return 0;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+
+
+	ret = dhd_preinit_ioctls(dhd);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+
+#ifdef PROP_TXSTATUS
+	ret = dhd_wlfc_init(dhd);
+#endif
+	goto done;
+
+done:
+	return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+	/* Nothing to do for CDC */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 0000000..1fbc283
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,2550 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c,v 1.57.2.22 2011-02-01 18:38:37 Exp $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+
+#include <proto/bcmevent.h>
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+char fw_path[MOD_PARAM_PATHLEN];
+char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on);
+#endif /* KEEP_ALIVE */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#ifdef DHD_DEBUG
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
+	__DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+
+
+/* IOVar table */
+enum {
+	IOV_VERSION = 1,
+	IOV_MSGLEVEL,
+	IOV_BCMERRORSTR,
+	IOV_BCMERROR,
+	IOV_WDTICK,
+	IOV_DUMP,
+	IOV_CLEARCOUNTS,
+	IOV_LOGDUMP,
+	IOV_LOGCAL,
+	IOV_LOGSTAMP,
+	IOV_GPIOOB,
+	IOV_IOCTLTIMEOUT,
+	IOV_HCI_CMD,		/* HCI command */
+	IOV_HCI_ACL_DATA,	/* HCI data packet */
+#if defined(DHD_DEBUG)
+	IOV_CONS,
+	IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+	IOV_PROPTXSTATUS_ENABLE,
+	IOV_PROPTXSTATUS_MODE,
+#endif
+	IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+	IOV_WLPKTDLYSTAT_SZ,
+#endif
+	IOV_CHANGEMTU,
+	IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+	{"version", 	IOV_VERSION,	0,	IOVT_BUFFER,	sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+	{"msglevel",	IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG */
+	{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER,	BCME_STRLEN },
+	{"bcmerror",	IOV_BCMERROR,	0,	IOVT_INT8,	0 },
+	{"wdtick",	IOV_WDTICK, 0,	IOVT_UINT32,	0 },
+	{"dump",	IOV_DUMP,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+	{"cons",	IOV_CONS,	0,	IOVT_BUFFER,	0 },
+	{"dconpoll",	IOV_DCONSOLE_POLL, 0,	IOVT_UINT32,	0 },
+#endif
+	{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID,	0 },
+	{"gpioob",	IOV_GPIOOB,	0,	IOVT_UINT32,	0 },
+	{"ioctl_timeout",	IOV_IOCTLTIMEOUT,	0,	IOVT_UINT32,	0 },
+	{"HCI_cmd",	IOV_HCI_CMD,	0,	IOVT_BUFFER,	0},
+	{"HCI_ACL_data", IOV_HCI_ACL_DATA, 0,	IOVT_BUFFER,	0},
+#ifdef PROP_TXSTATUS
+	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	IOVT_UINT32,	0 },
+	/*
+	set the proptxtstatus operation mode:
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	*/
+	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	IOVT_UINT32,	0 },
+#endif
+	{"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+	{"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+	{"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+struct dhd_cmn *
+dhd_common_init(osl_t *osh)
+{
+	dhd_cmn_t *cmn;
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	/* Allocate private bus interface state */
+	if (!(cmn = MALLOC(osh, sizeof(dhd_cmn_t)))) {
+		DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
+		return NULL;
+	}
+	memset(cmn, 0, sizeof(dhd_cmn_t));
+	cmn->osh = osh;
+
+#ifdef CONFIG_BCMDHD_FW_PATH
+	bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
+#else /* CONFIG_BCMDHD_FW_PATH */
+	fw_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+	bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+#else /* CONFIG_BCMDHD_NVRAM_PATH */
+	nv_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+#ifdef SOFTAP
+	fw_path2[0] = '\0';
+#endif
+	return cmn;
+}
+
+void
+dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn)
+{
+	osl_t *osh;
+	dhd_cmn_t *cmn;
+
+	if (dhd_pub != NULL)
+		cmn = dhd_pub->cmn;
+	else
+		cmn = sa_cmn;
+
+	if (!cmn)
+		return;
+
+	osh = cmn->osh;
+
+	if (dhd_pub != NULL)
+	dhd_pub->cmn = NULL;
+	MFREE(osh, cmn, sizeof(dhd_cmn_t));
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	struct bcmstrbuf b;
+	struct bcmstrbuf *strbuf = &b;
+
+	bcm_binit(strbuf, buf, buflen);
+
+	/* Base DHD info */
+	bcm_bprintf(strbuf, "%s\n", dhd_version);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+	            dhdp->up, dhdp->txoff, dhdp->busstate);
+	bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+	            dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
+
+	bcm_bprintf(strbuf, "dongle stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
+	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+	bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
+	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+	bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
+
+	bcm_bprintf(strbuf, "bus stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+	            dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+	bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+	bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
+	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+	bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld\n",
+	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+	bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld\n",
+	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any prot info */
+	dhd_prot_dump(dhdp, strbuf);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any bus info */
+	dhd_bus_dump(dhdp, strbuf);
+
+	return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
+{
+	wl_ioctl_t ioc;
+
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
+}
+
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
+{
+	int ret;
+
+	dhd_os_proto_block(dhd_pub);
+
+	ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
+
+
+	dhd_os_proto_unblock(dhd_pub);
+	return ret;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+            void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_VERSION):
+		/* Need to have checked buffer length */
+		bcm_strncpy_s((char*)arg, len, dhd_version, len);
+		break;
+
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)dhd_msg_level;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		dhd_msg_level = int_val;
+		break;
+	case IOV_GVAL(IOV_BCMERRORSTR):
+		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+		((char *)arg)[BCME_STRLEN - 1] = 0x00;
+		break;
+
+	case IOV_GVAL(IOV_BCMERROR):
+		int_val = (int32)dhd_pub->bcmerror;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_WDTICK):
+		int_val = (int32)dhd_watchdog_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WDTICK):
+		if (!dhd_pub->up) {
+			bcmerror = BCME_NOTUP;
+			break;
+		}
+		dhd_os_wd_timer(dhd_pub, (uint)int_val);
+		break;
+
+	case IOV_GVAL(IOV_DUMP):
+		bcmerror = dhd_dump(dhd_pub, arg, len);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_DCONSOLE_POLL):
+		int_val = (int32)dhd_console_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DCONSOLE_POLL):
+		dhd_console_ms = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_CONS):
+		if (len > 0)
+			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+		break;
+#endif /* DHD_DEBUG */
+
+	case IOV_SVAL(IOV_CLEARCOUNTS):
+		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+		dhd_pub->rx_dropped = 0;
+		dhd_pub->rx_readahead_cnt = 0;
+		dhd_pub->tx_realloc = 0;
+		dhd_pub->wd_dpc_sched = 0;
+		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+		dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+		/* clear proptxstatus related counters */
+		if (dhd_pub->wlfc_state) {
+			athost_wl_status_info_t *wlfc =
+			        (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+			wlfc_hanger_t* hanger;
+
+			memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+			hanger = (wlfc_hanger_t*)wlfc->hanger;
+			hanger->pushed = 0;
+			hanger->popped = 0;
+			hanger->failed_slotfind = 0;
+			hanger->failed_to_pop = 0;
+			hanger->failed_to_push = 0;
+		}
+#endif /* PROP_TXSTATUS */
+		break;
+
+
+	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+		break;
+	}
+
+	case IOV_SVAL(IOV_HCI_CMD): {
+		amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
+
+		/* sanity check: command preamble present */
+		if (len < HCI_CMD_PREAMBLE_SIZE)
+			return BCME_BUFTOOSHORT;
+
+		/* sanity check: command parameters are present */
+		if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
+			return BCME_BUFTOOSHORT;
+
+		dhd_bta_docmd(dhd_pub, cmd, len);
+		break;
+	}
+
+	case IOV_SVAL(IOV_HCI_ACL_DATA): {
+		amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
+
+		/* sanity check: HCI header present */
+		if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
+			return BCME_BUFTOOSHORT;
+
+		/* sanity check: ACL data is present */
+		if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
+			return BCME_BUFTOOSHORT;
+
+		dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
+		break;
+	}
+
+#ifdef PROP_TXSTATUS
+	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE):
+		int_val = dhd_pub->wlfc_enabled? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE):
+		dhd_pub->wlfc_enabled = int_val? 1 : 0;
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODE): {
+		athost_wl_status_info_t *wlfc =
+		        (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+		int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+		if (dhd_pub->wlfc_state) {
+			athost_wl_status_info_t *wlfc =
+			        (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+			wlfc->proptxstatus_mode = int_val & 0xff;
+		}
+		break;
+#endif /* PROP_TXSTATUS */
+
+	case IOV_GVAL(IOV_BUS_TYPE):
+	/* The dhd application query the driver to check if its usb or sdio.  */
+#ifdef BCMDHDUSB
+		int_val = BUS_TYPE_USB;
+#endif
+		int_val = BUS_TYPE_SDIO;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+
+#ifdef WLMEDIA_HTSF
+	case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+		int_val = dhd_pub->htsfdlystat_sz;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+		dhd_pub->htsfdlystat_sz = int_val & 0xff;
+		printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+		break;
+#endif
+	case IOV_SVAL(IOV_CHANGEMTU):
+		int_val &= 0xffff;
+		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+	return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+	 * because an encryption/rsn mismatch results in both events, and
+	 * the important information is in the WLC_E_PRUNE.
+	 */
+	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+	      dhd_conn_event == WLC_E_PRUNE)) {
+		dhd_conn_event = event;
+		dhd_conn_status = status;
+		dhd_conn_reason = reason;
+	}
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+	void *p;
+	int eprec = -1;		/* precedence to evict from */
+	bool discard_oldest;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		pktq_penq(q, prec, pkt);
+		return TRUE;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec))
+		eprec = prec;
+	else if (pktq_full(q)) {
+		p = pktq_peek_tail(q, &eprec);
+		ASSERT(p);
+		if (eprec > prec || eprec < 0)
+			return FALSE;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(q, eprec));
+		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+		if (eprec == prec && !discard_oldest)
+			return FALSE;		/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+		ASSERT(p);
+
+		PKTFREE(dhdp->osh, p, TRUE);
+	}
+
+	/* Enqueue */
+	p = pktq_penq(q, prec, pkt);
+	ASSERT(p);
+
+	return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	int val_size;
+	const bcm_iovar_t *vi = NULL;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+		name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+	int bcmerror = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!buf) {
+		return BCME_BADARG;
+	}
+
+	switch (ioc->cmd) {
+	case DHD_GET_MAGIC:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_MAGIC;
+		break;
+
+	case DHD_GET_VERSION:
+		if (buflen < sizeof(int))
+			bcmerror = -BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_VERSION;
+		break;
+
+	case DHD_GET_VAR:
+	case DHD_SET_VAR: {
+		char *arg;
+		uint arglen;
+
+		/* scan past the name to any arguments */
+		for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+			;
+
+		if (*arg) {
+			bcmerror = BCME_BUFTOOSHORT;
+			break;
+		}
+
+		/* account for the NUL terminator */
+		arg++, arglen--;
+
+		/* call with the appropriate arguments */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+			buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* not in generic table, try protocol module */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+				arglen, buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* if still not found, try bus module */
+		if (ioc->cmd == DHD_GET_VAR) {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				arg, arglen, buf, buflen, IOV_GET);
+		} else {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		}
+
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+	uint i, status, reason;
+	bool group = FALSE, flush_txq = FALSE, link = FALSE;
+	const char *auth_str;
+	const char *event_name;
+	uchar *buf;
+	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+	uint event_type, flags, auth_type, datalen;
+
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	reason = ntoh32(event->reason);
+	auth_type = ntoh32(event->auth_type);
+	datalen = ntoh32(event->datalen);
+
+	/* debug dump of event messages */
+	sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+	        (uchar)event->addr.octet[0]&0xff,
+	        (uchar)event->addr.octet[1]&0xff,
+	        (uchar)event->addr.octet[2]&0xff,
+	        (uchar)event->addr.octet[3]&0xff,
+	        (uchar)event->addr.octet[4]&0xff,
+	        (uchar)event->addr.octet[5]&0xff);
+
+	event_name = "UNKNOWN";
+	for (i = 0; i < (uint)bcmevent_names_size; i++)
+		if (bcmevent_names[i].event == event_type)
+			event_name = bcmevent_names[i].name;
+
+	if (flags & WLC_EVENT_MSG_LINK)
+		link = TRUE;
+	if (flags & WLC_EVENT_MSG_GROUP)
+		group = TRUE;
+	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+		flush_txq = TRUE;
+
+	switch (event_type) {
+	case WLC_E_START:
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC:
+	case WLC_E_REASSOC:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+			       event_name, eabuf, (int)reason));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+			       event_name, eabuf, (int)status));
+		}
+		break;
+
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+		break;
+
+	case WLC_E_AUTH:
+	case WLC_E_AUTH_IND:
+		if (auth_type == DOT11_OPEN_SYSTEM)
+			auth_str = "Open System";
+		else if (auth_type == DOT11_SHARED_KEY)
+			auth_str = "Shared Key";
+		else {
+			sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+			auth_str = err_msg;
+		}
+		if (event_type == WLC_E_AUTH_IND) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+			       event_name, eabuf, auth_str, (int)reason));
+		}
+
+		break;
+
+	case WLC_E_JOIN:
+	case WLC_E_ROAM:
+	case WLC_E_SET_SSID:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+		} else if (status == WLC_E_STATUS_NO_NETWORKS) {
+			DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+				event_name, (int)status));
+		}
+		break;
+
+	case WLC_E_BEACON_RX:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+		}
+		break;
+
+	case WLC_E_LINK:
+		DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+		break;
+
+	case WLC_E_MIC_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+		       event_name, eabuf, group, flush_txq));
+		break;
+
+	case WLC_E_ICV_ERROR:
+	case WLC_E_UNICAST_DECODE_ERROR:
+	case WLC_E_MULTICAST_DECODE_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+		       event_name, eabuf));
+		break;
+
+	case WLC_E_TXFAIL:
+		DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_SCAN_COMPLETE:
+	case WLC_E_PMKID_CACHE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+	case WLC_E_PFN_SCAN_COMPLETE:
+	case WLC_E_PFN_SCAN_NONE:
+	case WLC_E_PFN_SCAN_ALLGONE:
+		DHD_EVENT(("PNOEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PSK_SUP:
+	case WLC_E_PRUNE:
+		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+		           event_name, (int)status, (int)reason));
+		break;
+
+#ifdef WIFI_ACT_FRAME
+	case WLC_E_ACTION_FRAME:
+		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+		break;
+#endif /* WIFI_ACT_FRAME */
+
+	case WLC_E_TRACE: {
+		static uint32 seqnum_prev = 0;
+		msgtrace_hdr_t hdr;
+		uint32 nblost;
+		char *s, *p;
+
+		buf = (uchar *) event_data;
+		memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+		if (hdr.version != MSGTRACE_VERSION) {
+			printf("\nMACEVENT: %s [unsupported version --> "
+			       "dhd version:%d dongle version:%d]\n",
+			       event_name, MSGTRACE_VERSION, hdr.version);
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+			break;
+		}
+
+		/* There are 2 bytes available at the end of data */
+		buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+		if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+			printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+			       "discarded_bytes %d discarded_printf %d]\n",
+			       ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+		}
+
+		nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+		if (nblost > 0) {
+			printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
+			       ntoh32(hdr.seqnum), nblost);
+		}
+		seqnum_prev = ntoh32(hdr.seqnum);
+
+		/* Display the trace buffer. Advance from \n to \n to avoid display big
+		 * printf (issue with Linux printk )
+		 */
+		p = (char *)&buf[MSGTRACE_HDRLEN];
+		while ((s = strstr(p, "\n")) != NULL) {
+			*s = '\0';
+			printf("%s\n", p);
+			p = s+1;
+		}
+		printf("%s\n", p);
+
+		/* Reset datalen to avoid display below */
+		datalen = 0;
+		break;
+	}
+
+
+	case WLC_E_RSSI:
+		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+		break;
+
+	default:
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+		       event_name, event_type, eabuf, (int)status, (int)reason,
+		       (int)auth_type));
+		break;
+	}
+
+	/* show any appended data */
+	if (datalen) {
+		buf = (uchar *) event_data;
+		DHD_EVENT((" data (%d) : ", datalen));
+		for (i = 0; i < datalen; i++)
+			DHD_EVENT((" 0x%02x ", *buf++));
+		DHD_EVENT(("\n"));
+	}
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+              wl_event_msg_t *event, void **data_ptr)
+{
+	/* check whether packet is a BRCM event pkt */
+	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+	char *event_data;
+	uint32 type, status, reason, datalen;
+	uint16 flags;
+	int evlen;
+
+	if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+		DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+	if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+		DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	*data_ptr = &pvt_data[1];
+	event_data = *data_ptr;
+
+	/* memcpy since BRCM event pkt may be unaligned. */
+	memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+	type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+	reason = ntoh32_ua((void *)&event->reason);
+	datalen = ntoh32_ua((void *)&event->datalen);
+	evlen = datalen + sizeof(bcm_event_t);
+
+	switch (type) {
+#ifdef PROP_TXSTATUS
+	case WLC_E_FIFO_CREDIT_MAP:
+		dhd_wlfc_event(dhd_pub->info);
+		dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data);
+		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+			event_data[2],
+			event_data[3], event_data[4], event_data[5]));
+		break;
+#endif
+
+	case WLC_E_IF:
+		{
+		dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+#ifdef PROP_TXSTATUS
+{
+		uint8* ea = pvt_data->eth.ether_dhost;
+		WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+		              "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+		              ifevent->ifidx,
+		              ((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"),
+		              ((ifevent->is_AP == 0) ? "STA":"AP "),
+		              ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+		(void)ea;
+
+		dhd_wlfc_interface_event(dhd_pub->info,
+		                         ((ifevent->action == WLC_E_IF_ADD) ?
+		                          eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+		                         ifevent->ifidx, ifevent->is_AP, ea);
+
+		/* dhd already has created an interface by default, for 0 */
+		if (ifevent->ifidx == 0)
+			break;
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef WL_CFG80211
+			if (wl_cfg80211_is_progress_ifchange()) {
+				DHD_ERROR(("%s:  ifidx %d for %s action %d\n",
+					__FUNCTION__, ifevent->ifidx,
+					event->ifname, ifevent->action));
+			if (ifevent->action == WLC_E_IF_ADD)
+					wl_cfg80211_notify_ifchange();
+				return (BCME_OK);
+			}
+#endif /* WL_CFG80211 */
+				if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+					if (ifevent->action == WLC_E_IF_ADD) {
+						if (dhd_add_if(dhd_pub->info, ifevent->ifidx,
+							NULL, event->ifname,
+							event->addr.octet,
+							ifevent->flags, ifevent->bssidx)) {
+							DHD_ERROR(("%s: dhd_add_if failed!!"
+									" ifidx: %d for %s\n",
+									__FUNCTION__,
+									ifevent->ifidx,
+									event->ifname));
+							return (BCME_ERROR);
+						}
+					}
+			else
+				dhd_del_if(dhd_pub->info, ifevent->ifidx);
+		} else {
+#ifndef PROP_TXSTATUS
+			DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+			           __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+		}
+			}
+			/* send up the if event: btamp user needs it */
+			*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+			/* push up to external supp/auth */
+			dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+			break;
+
+
+#ifdef WLMEDIA_HTSF
+	case WLC_E_HTSFSYNC:
+		htsf_update(dhd_pub->info, event_data);
+		break;
+#endif /* WLMEDIA_HTSF */
+	case WLC_E_NDIS_LINK: {
+		uint32 temp = hton32(WLC_E_LINK);
+
+		memcpy((void *)(&pvt_data->event.event_type), &temp,
+		       sizeof(pvt_data->event.event_type));
+	}
+		/* These are what external supplicant/authenticator wants */
+		/* fall through */
+	case WLC_E_LINK:
+	case WLC_E_DEAUTH:
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+		/* fall through */
+	default:
+		*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+		/* push up to external supp/auth */
+		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+
+		/* put it back to WLC_E_NDIS_LINK */
+		if (type == WLC_E_NDIS_LINK) {
+			uint32 temp;
+
+			temp = ntoh32_ua((void *)&event->event_type);
+			DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+
+			temp = ntoh32(WLC_E_NDIS_LINK);
+			memcpy((void *)(&pvt_data->event.event_type), &temp,
+			       sizeof(pvt_data->event.event_type));
+		}
+		break;
+	}
+
+#ifdef SHOW_EVENTS
+	wl_show_host_event(event, (void *)event_data);
+#endif /* SHOW_EVENTS */
+
+	return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	 * byte order. Convert all members to host-order.
+	 */
+	evt->event_type = ntoh32(evt->event_type);
+	evt->flags = ntoh16(evt->flags);
+	evt->status = ntoh32(evt->status);
+	evt->reason = ntoh32(evt->reason);
+	evt->auth_type = ntoh32(evt->auth_type);
+	evt->datalen = ntoh32(evt->datalen);
+	evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+	int i, j = 0;
+	unsigned char *buf = pbuf;
+
+	if (bytes_per_line == 0) {
+		bytes_per_line = len;
+	}
+
+	for (i = 0; i < len; i++) {
+		printf("%2.2x", *buf++);
+		j++;
+		if (j == bytes_per_line) {
+			printf("\n");
+			j = 0;
+		} else {
+			printf(":");
+		}
+	}
+	printf("\n");
+#endif /* DHD_DEBUG */
+}
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		bcm_strncpy_s(num, sizeof(num), src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+	char				*argv[8];
+	int					i = 0;
+	const char 			*str;
+	int					buf_len;
+	int					str_len;
+	char				*arg_save = 0, *arg_org = 0;
+	int					rc;
+	char				buf[128];
+	wl_pkt_filter_enable_t	enable_parm;
+	wl_pkt_filter_enable_t	* pkt_filterp;
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	arg_org = arg_save;
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_enable";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, sizeof(buf), str, str_len);
+	buf[str_len] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+	/* Parse enable/disable value. */
+	enable_parm.enable = htod32(enable);
+
+	buf_len += sizeof(enable_parm);
+	memcpy((char *)pkt_filterp,
+	       &enable_parm,
+	       sizeof(enable_parm));
+
+	/* Enable/disable the specified filter. */
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+	/* Contorl the master mode */
+	bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+	const char 			*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int					buf_len;
+	int					str_len;
+	int 				rc;
+	uint32				mask_size;
+	uint32				pattern_size;
+	char				*argv[8], * buf = 0;
+	int					i = 0;
+	char				*arg_save = 0, *arg_org = 0;
+#define BUF_SIZE		2048
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	arg_org = arg_save;
+
+	if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	if (strlen(arg) > BUF_SIZE) {
+		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+		goto fail;
+	}
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+	while (argv[i++])
+		argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Polarity not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Filter type not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Offset not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Bitmask not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter mask. */
+	mask_size =
+		htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Pattern not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter pattern. */
+	pattern_size =
+		htod32(wl_pattern_atoh(argv[i],
+	         (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		DHD_ERROR(("Mask and pattern not the same size\n"));
+		goto fail;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+	** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	** guarantee that the buffer is properly aligned.
+	*/
+	memcpy((char *)pkt_filterp,
+	       &pkt_filter,
+	       WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+	if (buf)
+		MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+	char iovbuf[32];
+	int retcode;
+
+	bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+		__FUNCTION__, arp_mode, retcode));
+	else
+		DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+		__FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+	char iovbuf[32];
+	int retcode;
+
+	bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+		__FUNCTION__, arp_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+		__FUNCTION__, arp_enable));
+}
+
+void dhd_aoe_arp_clr(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[128];
+
+	if (dhd == NULL) return;
+
+	iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[128];
+
+	if (dhd == NULL) return;
+
+	iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr)
+{
+	int iov_len = 0;
+	char iovbuf[32];
+	int retcode;
+
+	iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+
+	if (retcode)
+		DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+		__FUNCTION__));
+}
+
+
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
+{
+#define MAX_IPV4_ENTRIES 8
+
+	int retcode, i;
+	int iov_len = 0;
+	uint32 *ptr32 = buf;
+	bool clr_bottom = FALSE;
+
+	if (!buf)
+		return -1;
+
+	iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, TRUE, 0);
+
+	/* clean up the buf, ascii reminder */
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+
+		if (!clr_bottom) {
+			if (*ptr32 == 0)
+			clr_bottom = TRUE;
+		} else {
+			*ptr32 = 0;
+		}
+		ptr32++;
+	}
+
+	if (retcode) {
+		DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+		__FUNCTION__, retcode));
+
+		return -1;
+	}
+	return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT  */
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+
+	uint up = 0;
+	uint power_mode = PM_FAST;
+	uint32 dongle_align = DHD_SDALIGN;
+	uint32 glom = 0;
+	uint bcn_timeout = 4;
+	int arpoe = 1;
+	int arp_ol = 0xf;
+	int scan_assoc_time = 40;
+	int scan_unassoc_time = 40;
+	const char 				*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int						buf_len;
+	int						str_len;
+	uint32					mask_size;
+	uint32					pattern_size;
+	char buf[WLC_IOCTL_SMLEN];
+	char *ptr;
+	uint filter_mode = 1;
+	uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(SOFTAP)
+	uint dtim = 1;
+#endif
+#ifdef AP
+	uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+	uint32 apsta = 1; /* Enable APSTA mode */
+#endif
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	ret = dhd_custom_get_mac_address(ea_addr.octet);
+	if (!ret) {
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+	} else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+		/* Get the default device MAC address directly from firmware */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		/* Update public MAC address after reading from Firmware */
+		memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+#ifdef GET_CUSTOM_MAC_ENABLE
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+	if (strstr(fw_path, "apsta") != NULL) {
+		uint rand_mac;
+
+		srandom32((uint)jiffies);
+		rand_mac = random32();
+		iovbuf[0] = 0x02;              /* locally administered bit */
+		iovbuf[1] = 0x1A;
+		iovbuf[2] = 0x11;
+		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+		iovbuf[4] = (unsigned char)(rand_mac >> 8);
+		iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+		bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+	}
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+	DHD_ERROR(("Firmware up: Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+	       dhd->mac.octet[0], dhd->mac.octet[1], dhd->mac.octet[2],
+	       dhd->mac.octet[3], dhd->mac.octet[4], dhd->mac.octet[5]));
+
+	/* Set Country code  */
+	if (dhd->dhd_cspec.ccode[0] != 0) {
+		bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+			sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+	}
+
+	/* Set Listen Interval */
+	bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	else {
+		bcmstrtok(&ptr, "\n", 0);
+		/* Print fw version info */
+		DHD_ERROR(("Firmware version = %s\n", buf));
+	}
+	/* Set PowerSave mode */
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+	/* Match Host and Dongle rx alignment */
+	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	/* disable glom option per default */
+	bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#ifdef AP
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	/* Enable APSTA mode */
+	bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+#if defined(SOFTAP)
+	if (ap_fw_loaded == TRUE) {
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+	}
+#endif
+
+#if defined(KEEP_ALIVE)
+	{
+	/* Set Keep Alive : be sure to use FW with -keepalive */
+	int res;
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == FALSE)
+#endif
+		if ((res = dhd_keep_alive_onoff(dhd, 1)) < 0)
+			DHD_ERROR(("%s set keeplive failed %d\n",
+			__FUNCTION__, res));
+	}
+#endif /* defined(KEEP_ALIVE) */
+
+	/* Force STA UP */
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
+	if (ret < 0)
+		goto done;
+
+	/* Setup event_msgs */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	if (ret < 0)
+		goto done;
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+	/* Setup event_msgs */
+	setbit(eventmask, WLC_E_SET_SSID);
+	setbit(eventmask, WLC_E_PRUNE);
+	setbit(eventmask, WLC_E_AUTH);
+	setbit(eventmask, WLC_E_REASSOC);
+	setbit(eventmask, WLC_E_REASSOC_IND);
+	setbit(eventmask, WLC_E_DEAUTH_IND);
+	setbit(eventmask, WLC_E_DISASSOC_IND);
+	setbit(eventmask, WLC_E_DISASSOC);
+	setbit(eventmask, WLC_E_JOIN);
+	setbit(eventmask, WLC_E_ASSOC_IND);
+	setbit(eventmask, WLC_E_PSK_SUP);
+	setbit(eventmask, WLC_E_LINK);
+	setbit(eventmask, WLC_E_NDIS_LINK);
+	setbit(eventmask, WLC_E_MIC_ERROR);
+	setbit(eventmask, WLC_E_PMKID_CACHE);
+	setbit(eventmask, WLC_E_TXFAIL);
+	setbit(eventmask, WLC_E_JOIN_START);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+#ifdef WLMEDIA_HTSF
+	setbit(eventmask, WLC_E_HTSFSYNC);
+#endif
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+		sizeof(scan_assoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+		sizeof(scan_unassoc_time), TRUE, 0);
+
+	/* Set ARP offload */
+	bcm_mkiovar("arpoe", (char *)&arpoe, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	bcm_mkiovar("arp_ol", (char *)&arp_ol, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	/* add a default packet filter pattern */
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(100);
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(0);
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(0);
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(0);
+
+	/* Parse pattern filter mask. */
+	mask_size =	htod32(wl_pattern_atoh("0x01",
+		(char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+	/* Parse pattern filter pattern. */
+	pattern_size = htod32(wl_pattern_atoh("0x00",
+		(char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		DHD_ERROR(("Mask and pattern not the same size\n"));
+		return -EINVAL;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+	** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	** guarantee that the buffer is properly aligned.
+	*/
+	memcpy((char *)pkt_filterp, &pkt_filter,
+		WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+	/* set mode to allow pattern */
+	bcm_mkiovar("pkt_filter_mode", (char *)&filter_mode, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	/* Set and enable ARP offload feature for STA only  */
+	if (dhd_arp_enable && !ap_fw_loaded) {
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+		dhd_arp_offload_enable(dhd, dhd_arp_enable);
+	} else {
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, FALSE);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+	if (ap_fw_loaded) {
+		int i;
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+				0, dhd_master_mode);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+
+
+done:
+	return ret;
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+	case WLC_E_BTA_HCI_EVENT:
+		break;
+	default:
+		break;
+	}
+
+	/* Call per-port handler. */
+	dhd_sendup_event(dhdp, event, data);
+}
+
+
+
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id = 0;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+	iscan_buf_t *iscanbuf_alloc = 0;
+	iscan_buf_t *iscanbuf_head;
+
+	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+	dhd_iscan_lock();
+
+	iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+	if (iscanbuf_alloc == NULL)
+		goto fail;
+
+	iscanbuf_alloc->next = NULL;
+	iscanbuf_head = *iscanbuf;
+
+	DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+		   "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+		   __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+	if (iscanbuf_head == NULL) {
+		*iscanbuf = iscanbuf_alloc;
+		DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+		goto fail;
+	}
+
+	while (iscanbuf_head->next)
+		iscanbuf_head = iscanbuf_head->next;
+
+	iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+	dhd_iscan_unlock();
+	return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+	iscan_buf_t *iscanbuf_free = 0;
+	iscan_buf_t *iscanbuf_prv = 0;
+	iscan_buf_t *iscanbuf_cur;
+	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+
+	dhd_iscan_lock();
+
+	iscanbuf_cur = iscan_chain;
+
+	/* If iscan_delete is null then delete the entire
+	 * chain or else delete specific one provided
+	 */
+	if (!iscan_delete) {
+		while (iscanbuf_cur) {
+			iscanbuf_free = iscanbuf_cur;
+			iscanbuf_cur = iscanbuf_cur->next;
+			iscanbuf_free->next = 0;
+			MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+		}
+		iscan_chain = 0;
+	} else {
+		while (iscanbuf_cur) {
+			if (iscanbuf_cur == iscan_delete)
+				break;
+			iscanbuf_prv = iscanbuf_cur;
+			iscanbuf_cur = iscanbuf_cur->next;
+		}
+		if (iscanbuf_prv)
+			iscanbuf_prv->next = iscan_delete->next;
+
+		iscan_delete->next = 0;
+		MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+		if (!iscanbuf_prv)
+			iscan_chain = 0;
+	}
+	dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+	return iscan_chain;
+}
+
+int
+dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
+{
+	int rc = -1;
+	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+	char *buf;
+	char iovar[] = "iscan";
+	uint32 allocSize = 0;
+	wl_ioctl_t ioctl;
+
+	if (pParams) {
+		allocSize = (size + strlen(iovar) + 1);
+		if ((allocSize < size) || (allocSize < strlen(iovar)))
+		{
+			DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
+				__FUNCTION__, allocSize, size, strlen(iovar)));
+			goto cleanUp;
+		}
+		buf = MALLOC(dhd->osh, allocSize);
+
+		if (buf == NULL)
+			{
+			DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
+			goto cleanUp;
+			}
+		ioctl.cmd = WLC_SET_VAR;
+		bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
+		rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, allocSize);
+	}
+
+cleanUp:
+	if (buf) {
+		MFREE(dhd->osh, buf, allocSize);
+	}
+
+	return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+	wl_iscan_results_t *list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	iscan_buf_t *iscan_cur;
+	int status = -1;
+	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+	int rc;
+	wl_ioctl_t ioctl;
+
+	DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
+
+	iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+	if (!iscan_cur) {
+		DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+		dhd_iscan_free_buf(dhdp, 0);
+		dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+		dhd_ind_scan_confirm(dhdp, FALSE);
+		goto fail;
+	}
+
+	dhd_iscan_lock();
+
+	memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+		iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+	ioctl.cmd = WLC_GET_VAR;
+	ioctl.set = FALSE;
+	rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	*scan_count = results->count = dtoh32(results->count);
+	status = dtoh32(list_buf->status);
+	DHD_ISCAN(("%s: Got %d resuls\n", __FUNCTION__, results->count));
+
+	dhd_iscan_unlock();
+
+	if (!(*scan_count)) {
+		 /* TODO: race condition when FLUSH already called */
+		dhd_iscan_free_buf(dhdp, 0);
+	}
+fail:
+	return status;
+}
+
+#endif /* SIMPLE_ISCAN */
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_dtim_skip(dhd_pub_t *dhd)
+{
+	int bcn_li_dtim;
+	int ret;
+	uint8 bssid[6];
+	int dtim_assoc = 0;
+
+	if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
+		bcn_li_dtim = 3;
+	else
+		bcn_li_dtim = dhd->dtim_skip;
+
+	/* Check if associated */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID,
+		(char *)&bssid, ETHER_ADDR_LEN, FALSE, 0)) == BCME_NOTASSOCIATED) {
+		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* if assoc grab ap's dtim value */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+		&dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+		__FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
+
+	/* if not assocated just eixt */
+	if (dtim_assoc == 0) {
+		goto exit;
+	}
+
+	/* check if sta listen interval fits into AP dtim */
+	if (dtim_assoc > LISTEN_INTERVAL) {
+		/* AP DTIM to big for our Listen Interval : no dtim skiping */
+		bcn_li_dtim = 1;
+		DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+			__FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
+		goto exit;
+	}
+
+	if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
+		/* Round up dtim_skip to fit into STAs Listen Interval */
+		bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
+		DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+	}
+
+exit:
+	return bcn_li_dtim;
+}
+
+
+#ifdef PNO_SUPPORT
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+	char iovbuf[128];
+	int pfn_enabled = 0;
+	int iov_len = 0;
+	int ret;
+
+	/* Disable pfn */
+	iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) >= 0) {
+		/* clear pfn */
+		iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+		if (iov_len) {
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			                            iov_len, TRUE, 0)) < 0) {
+				DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+			}
+		}
+		else {
+			ret = -1;
+			DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+		}
+	}
+	else
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+	return ret;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+	char iovbuf[128];
+	uint8 bssid[6];
+	int ret = -1;
+
+	if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
+		DHD_ERROR(("%s error exit\n", __FUNCTION__));
+		return ret;
+	}
+
+	memset(iovbuf, 0, sizeof(iovbuf));
+	/* Check if disassoc to enable pno */
+	if ((pfn_enabled) &&
+		((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID,
+		(char *)&bssid, ETHER_ADDR_LEN, TRUE, 0)) == BCME_NOTASSOCIATED)) {
+			DHD_TRACE(("%s pno enable called in disassoc mode\n", __FUNCTION__));
+	}
+	else if (pfn_enabled) {
+			DHD_ERROR(("%s pno enable called in assoc mode ret=%d\n",
+				__FUNCTION__, ret));
+			return ret;
+	}
+	/* Enable/disable PNO */
+	if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		                            sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		else {
+			dhd->pno_enable = pfn_enabled;
+			DHD_TRACE(("%s set pno as %d\n", __FUNCTION__, dhd->pno_enable));
+		}
+	}
+	else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+	return ret;
+}
+
+/* Function to execute combined scan */
+int
+dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr,
+	int pno_repeat, int pno_freq_expo_max)
+{
+	int err = -1;
+	char iovbuf[128];
+	int k, i;
+	wl_pfn_param_t pfn_param;
+	wl_pfn_t	pfn_element;
+	uint len = 0;
+
+	DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
+
+	if ((!dhd) && (!ssids_local)) {
+		DHD_ERROR(("%s error exit\n", __FUNCTION__));
+		err = -1;
+	}
+
+	/* Check for broadcast ssid */
+	for (k = 0; k < nssid; k++) {
+		if (!ssids_local[k].SSID_len) {
+			DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+			return err;
+		}
+	}
+/* #define  PNO_DUMP 1 */
+#ifdef PNO_DUMP
+	{
+		int j;
+		for (j = 0; j < nssid; j++) {
+			DHD_ERROR(("%d: scan  for  %s size =%d\n", j,
+				ssids_local[j].SSID, ssids_local[j].SSID_len));
+		}
+	}
+#endif /* PNO_DUMP */
+
+	/* clean up everything */
+	if  ((err = dhd_pno_clean(dhd)) < 0) {
+		DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
+		return err;
+	}
+	memset(&pfn_param, 0, sizeof(pfn_param));
+	memset(&pfn_element, 0, sizeof(pfn_element));
+
+	/* set pfn parameters */
+	pfn_param.version = htod32(PFN_VERSION);
+	pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+	/* check and set extra pno params */
+	if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
+		pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+		pfn_param.repeat = htod32(pno_repeat);
+		pfn_param.exp = htod32(pno_freq_expo_max);
+	}
+	/* set up pno scan fr */
+	if (scan_fr  != 0)
+		pfn_param.scan_freq = htod32(scan_fr);
+
+	if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
+		DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+		return err;
+	}
+	if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
+		DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+		return err;
+	}
+	len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0);
+
+	/* set all pfn ssid */
+	for (i = 0; i < nssid; i++) {
+
+		pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+		pfn_element.auth = (DOT11_OPEN_SYSTEM);
+		pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+		pfn_element.wsec = htod32(0);
+		pfn_element.infra = htod32(1);
+
+		memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
+		pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
+
+		if ((len =
+		bcm_mkiovar("pfn_add", (char *)&pfn_element,
+			sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+			if ((err =
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+				DHD_ERROR(("%s failed for i=%d error=%d\n",
+					__FUNCTION__, i, err));
+				return err;
+			}
+			else
+				DHD_ERROR(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n",
+					__FUNCTION__, pfn_param.scan_freq,
+					pfn_param.repeat, pfn_param.exp));
+		}
+		else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
+	}
+
+	/* Enable PNO */
+	/* dhd_pno_enable(dhd, 1); */
+	return err;
+}
+
+int
+dhd_pno_get_status(dhd_pub_t *dhd)
+{
+	int ret = -1;
+
+	if (!dhd)
+		return ret;
+	else
+		return (dhd->pno_enable);
+}
+
+#endif /* PNO_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd, int ka_on)
+{
+	char buf[256];
+	char *buf_ptr = buf;
+	wl_keep_alive_pkt_t keep_alive_pkt;
+	char * str;
+	int str_len, buf_len;
+	int res = -1;
+	int keep_alive_period = KEEP_ALIVE_PERIOD; /* in ms */
+
+	DHD_TRACE(("%s: param=%d\n", __FUNCTION__, ka_on));
+
+	if (ka_on) { /* on suspend */
+		keep_alive_pkt.period_msec = keep_alive_period;
+
+	} else {
+		/* on resume, turn off keep_alive packets  */
+		keep_alive_pkt.period_msec = 0;
+	}
+
+	/* IOC var name  */
+	str = "keep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[str_len] = '\0';
+	buf_len = str_len + 1;
+
+	/* set ptr to IOCTL payload after the var name */
+	buf_ptr += buf_len; /* include term Z */
+
+	/* copy Keep-alive attributes from local var keep_alive_pkt */
+	str = NULL_PKT_STR;
+	keep_alive_pkt.len_bytes = strlen(str);
+
+	memcpy(buf_ptr, &keep_alive_pkt, WL_KEEP_ALIVE_FIXED_LEN);
+	buf_ptr += WL_KEEP_ALIVE_FIXED_LEN;
+
+	/* copy packet data */
+	memcpy(buf_ptr, str, keep_alive_pkt.len_bytes);
+	buf_len += (WL_KEEP_ALIVE_FIXED_LEN + keep_alive_pkt.len_bytes);
+/*
+	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+*/
+	return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ *  data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+                     int input_size, int *bytes_left)
+{
+	char* str = *list_str;
+	uint16 short_temp;
+	uint32 int_temp;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* Clean all dest bytes */
+	memset(dst, 0, dst_size);
+	while (*bytes_left > 0) {
+
+		if (str[0] != token) {
+			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+				__FUNCTION__, token, str[0], *bytes_left));
+			return -1;
+		}
+
+		*bytes_left -= 1;
+		str += 1;
+
+		if (input_size == 1) {
+			memcpy(dst, str, input_size);
+		}
+		else if (input_size == 2) {
+			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+				input_size);
+		}
+		else if (input_size == 4) {
+			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+				input_size);
+		}
+
+		*bytes_left -= input_size;
+		str += input_size;
+		*list_str = str;
+		return 1;
+	}
+	return 1;
+}
+
+/*
+ *  channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+                             int channel_num, int *bytes_left)
+{
+	char* str = *list_str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+			*list_str = str;
+			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* All channels */
+			channel_list[idx] = 0x0;
+		}
+		else {
+			channel_list[idx] = (uint16)str[0];
+			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
+		}
+		*bytes_left -= 1;
+		str += 1;
+
+		if (idx++ > 255) {
+			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/*
+ *  SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+	char* str =  *list_str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+			*list_str = str;
+			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+
+		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* Broadcast SSID */
+			ssid[idx].SSID_len = 0;
+			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+			*bytes_left -= 1;
+			str += 1;
+
+			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
+		}
+		else if (str[0] <= DOT11_MAX_SSID_LEN) {
+			/* Get proper SSID size */
+			ssid[idx].SSID_len = str[0];
+			*bytes_left -= 1;
+			str += 1;
+
+			/* Get SSID */
+			if (ssid[idx].SSID_len > *bytes_left) {
+				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+				return -1;
+			}
+
+			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+			*bytes_left -= ssid[idx].SSID_len;
+			str += ssid[idx].SSID_len;
+
+			DHD_TRACE(("%s :size=%d left=%d\n",
+				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+		}
+		else {
+			DHD_ERROR(("### SSID size more that %d\n", str[0]));
+			return -1;
+		}
+
+		if (idx++ >  max) {
+			DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx.  Max specifies size of the ssid array.  Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied.  Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+	char* str, *ptr;
+
+	if ((list_str == NULL) || (*list_str == NULL))
+		return -1;
+
+	for (str = *list_str; str != NULL; str = ptr) {
+
+		/* check for next TAG */
+		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+			*list_str	 = str + strlen(GET_CHANNEL);
+			return idx;
+		}
+
+		if ((ptr = strchr(str, ',')) != NULL) {
+			*ptr++ = '\0';
+		}
+
+		if (strlen(str) > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+			return -1;
+		}
+
+		if (strlen(str) == 0)
+			ssid[idx].SSID_len = 0;
+
+		if (idx < max) {
+			bcm_strcpy_s((char*)ssid[idx].SSID, sizeof(ssid[idx].SSID), str);
+			ssid[idx].SSID_len = strlen(str);
+		}
+		idx++;
+	}
+	return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+	int num;
+	int val;
+	char* str;
+	char* endptr = NULL;
+
+	if ((list_str == NULL)||(*list_str == NULL))
+		return -1;
+
+	str = *list_str;
+	num = 0;
+	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+		val = (int)strtoul(str, &endptr, 0);
+		if (endptr == str) {
+			printf("could not parse channel number starting at"
+				" substring \"%s\" in list:\n%s\n",
+				str, *list_str);
+			return -1;
+		}
+		str = endptr + strspn(endptr, " ,");
+
+		if (num == channel_num) {
+			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+				channel_num, *list_str));
+			return -1;
+		}
+
+		channel_list[num++] = (uint16)val;
+	}
+	*list_str = str;
+	return num;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 0000000..9750eeb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,293 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2011, Broadcom Corporation
+* 
+*         Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c,v 1.2.42.1 2010-10-19 00:41:09 Exp $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern  void bcm_wlan_power_off(int);
+extern  void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+#if defined(CUSTOMER_HW2)
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#else
+int wifi_set_power(int on, unsigned long msec) { return -1; }
+int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; }
+int wifi_get_mac_addr(unsigned char *buf) { return -1; }
+void *wifi_get_country_code(char *ccode) { return NULL; }
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#endif /* CUSTOMER_HW2 */
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC)  */
+
+#ifdef CUSTOMER_HW3
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion  */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ *  1) return :  Host gpio interrupt number per customer platform
+ *  2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ *  NOTE :
+ *  Customer should check his platform definitions
+ *  and his Host Interrupt spec
+ *  to figure out the proper setting for his platform.
+ *  Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+{
+	int  host_oob_irq = 0;
+
+#ifdef CUSTOMER_HW2
+	host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+	if (dhd_oob_gpio_num < 0) {
+		dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+	}
+#endif /* CUSTOMER_HW2 */
+
+	if (dhd_oob_gpio_num < 0) {
+		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+			__FUNCTION__));
+		return (dhd_oob_gpio_num);
+	}
+
+	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+	         __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW
+	host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
+#elif defined CUSTOMER_HW3
+	gpio_request(dhd_oob_gpio_num, "oob irq");
+	host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+	gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW */
+#endif /* CUSTOMER_HW2 */
+
+	return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Customer function to control hw specific wlan gpios */
+void
+dhd_customer_gpio_wlan_ctrl(int onoff)
+{
+	switch (onoff) {
+		case WLAN_RESET_OFF:
+			WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_off(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+			wifi_set_power(0, 0);
+#endif
+			WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+		break;
+
+		case WLAN_RESET_ON:
+			WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_on(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+			wifi_set_power(1, 0);
+#endif
+			WL_ERROR(("=========== WLAN going back to live  ========\n"));
+		break;
+
+		case WLAN_POWER_OFF:
+			WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_off(1);
+#endif /* CUSTOMER_HW */
+		break;
+
+		case WLAN_POWER_ON:
+			WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+				__FUNCTION__));
+#ifdef CUSTOMER_HW
+			bcm_wlan_power_on(1);
+			/* Lets customer power to get stable */
+			OSL_DELAY(200);
+#endif /* CUSTOMER_HW */
+		break;
+	}
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+	int ret = 0;
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+
+	/* Customer access to MAC address stored outside of DHD driver */
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+	ret = wifi_get_mac_addr(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+
+	return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+	{"",   "XY", 4},  /* Universal if Country code is unknown or empty */
+	{"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+	{"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+	{"EU", "EU", 5},  /* European union countries to : EU regrev 05 */
+	{"AT", "EU", 5},
+	{"BE", "EU", 5},
+	{"BG", "EU", 5},
+	{"CY", "EU", 5},
+	{"CZ", "EU", 5},
+	{"DK", "EU", 5},
+	{"EE", "EU", 5},
+	{"FI", "EU", 5},
+	{"FR", "EU", 5},
+	{"DE", "EU", 5},
+	{"GR", "EU", 5},
+	{"HU", "EU", 5},
+	{"IE", "EU", 5},
+	{"IT", "EU", 5},
+	{"LV", "EU", 5},
+	{"LI", "EU", 5},
+	{"LT", "EU", 5},
+	{"LU", "EU", 5},
+	{"MT", "EU", 5},
+	{"NL", "EU", 5},
+	{"PL", "EU", 5},
+	{"PT", "EU", 5},
+	{"RO", "EU", 5},
+	{"SK", "EU", 5},
+	{"SI", "EU", 5},
+	{"ES", "EU", 5},
+	{"SE", "EU", 5},
+	{"GB", "EU", 5},
+	{"KR", "XY", 3},
+	{"AU", "XY", 3},
+	{"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+	{"TW", "XY", 3},
+	{"AR", "XY", 3},
+	{"MX", "XY", 3},
+	{"IL", "IL", 0},
+	{"CH", "CH", 0},
+	{"TR", "TR", 0},
+	{"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+};
+
+
+/* Customized Locale convertor
+*  input : ISO 3166-1 country abbreviation
+*  output: customized cspec
+*/
+void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+	struct cntry_locales_custom *cloc_ptr;
+
+	if (!cspec)
+		return;
+
+	cloc_ptr = wifi_get_country_code(country_iso_code);
+	if (cloc_ptr) {
+		strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+		cspec->rev = cloc_ptr->custom_locale_rev;
+	}
+	return;
+#else
+	int size, i;
+
+	size = ARRAYSIZE(translate_custom_table);
+
+	if (cspec == 0)
+		 return;
+
+	if (size == 0)
+		 return;
+
+	for (i = 0; i < size; i++) {
+		if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+			memcpy(cspec->ccode,
+				translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+			cspec->rev = translate_custom_table[i].custom_locale_rev;
+			return;
+		}
+	}
+#ifdef EXAMPLE_TABLE
+	/* if no country code matched return first universal code from translate_custom_table */
+	memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+	cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+	return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 0000000..10851c4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,104 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h,v 1.9.6.1 2010-12-22 23:47:24 Exp $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args)	       do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+								printf args;} while (0)
+#define DHD_TRACE(args)		do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args)		do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args)		do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args)		do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args)		do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args)		do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args)		do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args)		do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args)		do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args)		do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args)		do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args)		do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+
+#define DHD_ERROR_ON()		(dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON()		(dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON()		(dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON()		(dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON()		(dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON()		(dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON()		(dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON()		(dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON()		(dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON()		(dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON()		(dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON()		(dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON()		(dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON()		(dhd_msg_level & DHD_ARPOE_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args)    	do {if (net_ratelimit()) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+
+#define DHD_ERROR_ON()		0
+#define DHD_TRACE_ON()		0
+#define DHD_INFO_ON()		0
+#define DHD_DATA_ON()		0
+#define DHD_CTL_ON()		0
+#define DHD_TIMER_ON()		0
+#define DHD_HDRS_ON()		0
+#define DHD_BYTES_ON()		0
+#define DHD_INTR_ON()		0
+#define DHD_GLOM_ON()		0
+#define DHD_EVENT_ON()		0
+#define DHD_BTA_ON()		0
+#define DHD_ISCAN_ON()		0
+#define DHD_ARPOE_ON()		0
+#endif 
+
+#define DHD_LOG(args)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 0000000..e952ee1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,4575 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c,v 1.131.2.55 2011-02-09 05:31:56 Exp $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200    /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
+#define TSMAX  1000        /* max no. of timing record kept   */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+	uint32 bin[NUMBIN];
+} histo_t;
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+#endif
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+#ifdef ARP_OFFLOAD_SUPPORT
+static int dhd_device_event(struct notifier_block *this,
+	unsigned long event,
+	void *ptr);
+
+static struct notifier_block dhd_notifier = {
+	.notifier_call = dhd_device_event
+};
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+	return "";
+}
+#endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(CONFIG_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t  g_wl_iw_params;
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+/* Interface control information */
+typedef struct dhd_if {
+	struct dhd_info *info;			/* back pointer to dhd_info */
+	/* OS/stack specifics */
+	struct net_device *net;
+	struct net_device_stats stats;
+	int 			idx;			/* iface idx in dongle */
+	int 			state;			/* interface state */
+	uint 			subunit;		/* subunit */
+	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	bool			attached;		/* Delayed attachment when unset */
+	bool			txflowcontrol;	/* Per interface flow control indicator */
+	char			name[IFNAMSIZ+1]; /* linux interface name */
+	uint8			bssidx;			/* bsscfg index for the interface */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+	uint32 low;
+	uint32 high;
+} tsf_t;
+
+typedef struct {
+	uint32 last_cycle;
+	uint32 last_sec;
+	uint32 last_tsf;
+	uint32 coef;     /* scaling factor */
+	uint32 coefdec1; /* first decimal  */
+	uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+	uint32 t1;
+	uint32 t2;
+	uint32 t3;
+	uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif  /* WLMEDIA_HTSF */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(CONFIG_WIRELESS_EXT)
+	wl_iw_t		iw;		/* wireless extensions state (must be first) */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	dhd_pub_t pub;
+
+	/* For supporting multiple interfaces */
+	dhd_if_t *iflist[DHD_MAX_IFS];
+
+	struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+	spinlock_t	wlfc_spinlock;
+#endif
+#ifdef WLMEDIA_HTSF
+	htsf_t  htsf;
+#endif
+	wait_queue_head_t ioctl_resp_wait;
+	struct timer_list timer;
+	bool wd_timer_valid;
+	struct tasklet_struct tasklet;
+	spinlock_t	sdlock;
+	spinlock_t	txqlock;
+	spinlock_t	dhd_lock;
+#ifdef DHDTHREAD
+	/* Thread based operation */
+	bool threads_only;
+	struct semaphore sdsem;
+
+	tsk_ctl_t	thr_dpc_ctl;
+	tsk_ctl_t	thr_wdt_ctl;
+
+#else
+	bool dhd_tasklet_create;
+#endif /* DHDTHREAD */
+	tsk_ctl_t	thr_sysioc_ctl;
+
+	/* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	struct wake_lock wl_wifi;   /* Wifi wakelock */
+	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	/* net_device interface lock, prevent race conditions among net_dev interface
+	 * calls and wifi_on or wifi_off
+	 */
+	struct mutex dhd_net_if_mutex;
+#endif
+	spinlock_t wakelock_spinlock;
+	int wakelock_counter;
+	int wakelock_timeout_enable;
+
+	int hang_was_sent;
+
+	/* Thread to issue ioctl for multicast */
+	bool set_multicast;
+	bool set_macaddress;
+	struct ether_addr macvalue;
+	wait_queue_head_t ctrl_wait;
+	atomic_t pend_8021x_cnt;
+	dhd_attach_states_t dhd_state;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+} dhd_info_t;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+extern int wl_control_wl_start(struct net_device *dev);
+extern int net_os_send_hang_message(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+struct semaphore dhd_registration_sem;
+#define DHD_REGISTRATION_TIMEOUT  12000  /* msec : allowed time to finished dhd registration */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* Spawn a thread for system ioctls (set mac, set mcast) */
+uint dhd_sysioc = TRUE;
+module_param(dhd_sysioc, uint, 0);
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+
+/* Watchdog interval */
+uint dhd_watchdog_ms = 10;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0);
+#endif /* defined(DHD_DEBUG) */
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+uint dhd_arp_mode = 0xb;
+module_param(dhd_arp_mode, uint, 0);
+
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+
+/*  Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 1);
+
+#ifdef DHDTHREAD
+/* Watchdog thread priority, -1 to use kernel timer */
+int dhd_watchdog_prio = 97;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int dhd_dpc_prio = 98;
+module_param(dhd_dpc_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+extern int dhd_dongle_memsize;
+module_param(dhd_dongle_memsize, int, 0);
+#endif /* DHDTHREAD */
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ];
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE()	(!in_atomic())
+#else
+#define BLOCKABLE()	(!in_interrupt())
+#endif
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE        "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#else
+#define DHD_COMPILED
+#endif /* DHD_DEBUG */
+
+static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+#ifdef DHD_DEBUG
+"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
+#endif
+;
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+                             wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) && 1
+static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+	int ret = NOTIFY_DONE;
+
+	switch (action)	{
+		case PM_HIBERNATION_PREPARE:
+		case PM_SUSPEND_PREPARE:
+			dhd_mmc_suspend = TRUE;
+		ret = NOTIFY_OK;
+		break;
+		case PM_POST_HIBERNATION:
+		case PM_POST_SUSPEND:
+			dhd_mmc_suspend = FALSE;
+		ret = NOTIFY_OK;
+		break;
+	}
+	smp_mb();
+	return ret;
+}
+
+static struct notifier_block dhd_sleep_pm_notifier = {
+	.notifier_call = dhd_sleep_pm_callback,
+	.priority = 0
+};
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+	/* && defined(DHD_GPL) */
+static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
+	/* 1 - Enable packet filter, only allow unicast packet to send up */
+	/* 0 - Disable packet filter */
+	if (dhd_pkt_filter_enable) {
+		int i;
+
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+				value, dhd_master_mode);
+		}
+	}
+#endif
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+	int power_mode = PM_MAX;
+	/* wl_pkt_filter_enable_t	enable_parm; */
+	char iovbuf[32];
+	int bcn_li_dtim = 3;
+	uint roamvar = 1;
+
+	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+		__FUNCTION__, value, dhd->in_suspend));
+
+	if (dhd && dhd->up) {
+		if (value && dhd->in_suspend) {
+
+				/* Kernel suspended */
+				DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+
+				/* Enable packet filter, only allow unicast packet to send up */
+				dhd_set_packet_filter(1, dhd);
+
+				/* If DTIM skip is set up as default, force it to wake
+				 * each third DTIM for better power savings.  Note that
+				 * one side effect is a chance to miss BC/MC packet.
+				 */
+				bcn_li_dtim = dhd_get_dtim_skip(dhd);
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+				/* Disable firmware roaming during suspend */
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+					iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			} else {
+
+				/* Kernel resumed  */
+				DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+				power_mode = PM_FAST;
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+
+				/* disable pkt filter */
+				dhd_set_packet_filter(0, dhd);
+
+				/* restore pre-suspend setting for dtim_skip */
+				bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
+					4, iovbuf, sizeof(iovbuf));
+
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+				roamvar = dhd_roam_disable;
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+					sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			}
+	}
+
+	return 0;
+}
+
+static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
+{
+	dhd_pub_t *dhdp = &dhd->pub;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	/* Set flag when early suspend was called */
+	dhdp->in_suspend = val;
+	if (!dhdp->suspend_disable_flag)
+		dhd_set_suspend(val, dhdp);
+	DHD_OS_WAKE_UNLOCK(dhdp);
+}
+
+static void dhd_early_suspend(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 1);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 0);
+}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+/*
+ * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
+ *
+ *      dhd_timeout_start(&tmo, usec);
+ *      while (!dhd_timeout_expired(&tmo))
+ *              if (poll_something())
+ *                      break;
+ *      if (dhd_timeout_expired(&tmo))
+ *              fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+	tmo->limit = usec;
+	tmo->increment = 0;
+	tmo->elapsed = 0;
+	tmo->tick = 1000000 / HZ;
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+	/* Does nothing the first call */
+	if (tmo->increment == 0) {
+		tmo->increment = 1;
+		return 0;
+	}
+
+	if (tmo->elapsed >= tmo->limit)
+		return 1;
+
+	/* Add the delay that's about to take place */
+	tmo->elapsed += tmo->increment;
+
+	if (tmo->increment < tmo->tick) {
+		OSL_DELAY(tmo->increment);
+		tmo->increment *= 2;
+		if (tmo->increment > tmo->tick)
+			tmo->increment = tmo->tick;
+	} else {
+		wait_queue_head_t delay_wait;
+		DECLARE_WAITQUEUE(wait, current);
+		int pending;
+		init_waitqueue_head(&delay_wait);
+		add_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(1);
+		pending = signal_pending(current);
+		remove_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_RUNNING);
+		if (pending)
+			return 1;	/* Interrupted */
+	}
+
+	return 0;
+}
+
+static int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+	int i = 0;
+
+	ASSERT(dhd);
+	while (i < DHD_MAX_IFS) {
+		if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+			return i;
+		i++;
+	}
+
+	return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(struct dhd_pub *dhd_pub, int ifidx)
+{
+	struct dhd_info *dhd_info;
+
+	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+		return NULL;
+	dhd_info = dhd_pub->info;
+	if (dhd_info && dhd_info->iflist[ifidx])
+		return dhd_info->iflist[ifidx]->net;
+	return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (name == NULL || *name == '\0')
+		return 0;
+
+	while (--i > 0)
+		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+				break;
+
+	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+	return i;	/* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return "<if_bad>";
+	}
+
+	if (dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+		return "<if_null>";
+	}
+
+	if (dhd->iflist[ifidx]->net)
+		return dhd->iflist[ifidx]->net->name;
+
+	return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+	int i;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+	ASSERT(dhd);
+	for (i = 0; i < DHD_MAX_IFS; i++)
+	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+		return dhd->iflist[i]->mac_addr;
+
+	return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+	struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	struct netdev_hw_addr *ha;
+#else
+	struct dev_mc_list *mclist;
+#endif
+	uint32 allmulti, cnt;
+
+	wl_ioctl_t ioc;
+	char *buf, *bufp;
+	uint buflen;
+	int ret;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	cnt = netdev_mc_count(dev);
+#else
+	cnt = dev->mc_count;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_unlock_bh(dev);
+#endif
+
+	/* Determine initial value of allmulti flag */
+	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+	/* Send down the multicast list first. */
+
+
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+		           dhd_ifname(&dhd->pub, ifidx), cnt));
+		return;
+	}
+
+	strcpy(bufp, "mcast_list");
+	bufp += strlen("mcast_list") + 1;
+
+	cnt = htol32(cnt);
+	memcpy(bufp, &cnt, sizeof(cnt));
+	bufp += sizeof(cnt);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	netdev_for_each_mc_addr(ha, dev) {
+		if (!cnt)
+			break;
+		memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+		bufp += ETHER_ADDR_LEN;
+		cnt--;
+	}
+#else
+	for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
+		memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+		bufp += ETHER_ADDR_LEN;
+	}
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_unlock_bh(dev);
+#endif
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+			dhd_ifname(&dhd->pub, ifidx), cnt));
+		allmulti = cnt ? TRUE : allmulti;
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+
+	buflen = sizeof("allmulti") + sizeof(allmulti);
+	if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+		return;
+	}
+	allmulti = htol32(allmulti);
+
+	if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+		DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+		           dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+		MFREE(dhd->pub.osh, buf, buflen);
+		return;
+	}
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set allmulti %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+	allmulti = htol32(allmulti);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_PROMISC;
+	ioc.buf = &allmulti;
+	ioc.len = sizeof(allmulti);
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set promisc %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+}
+
+static int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+{
+	char buf[32];
+	wl_ioctl_t ioc;
+	int ret;
+
+	if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+		return -1;
+	}
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = 32;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+	} else {
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_op_if(dhd_if_t *ifp)
+{
+	dhd_info_t *dhd;
+	int ret = 0, err = 0;
+#ifdef SOFTAP
+	unsigned long flags;
+#endif
+
+	ASSERT(ifp && ifp->info && ifp->idx);	/* Virtual interfaces only */
+
+	dhd = ifp->info;
+
+	DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_is_progress_ifchange())
+			return;
+
+#endif
+	switch (ifp->state) {
+	case WLC_E_IF_ADD:
+		/*
+		 * Delete the existing interface before overwriting it
+		 * in case we missed the WLC_E_IF_DEL event.
+		 */
+		if (ifp->net != NULL) {
+			DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
+			 __FUNCTION__, ifp->net->name));
+			netif_stop_queue(ifp->net);
+			unregister_netdev(ifp->net);
+			free_netdev(ifp->net);
+		}
+		/* Allocate etherdev, including space for private structure */
+		if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
+			DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+			ret = -ENOMEM;
+		}
+		if (ret == 0) {
+			strncpy(ifp->net->name, ifp->name, IFNAMSIZ);
+#ifdef WL_CFG80211
+			if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
+				wl_cfg80211_notify_ifadd(ifp->net);
+#endif
+
+			ifp->net->name[IFNAMSIZ - 1] = '\0';
+			memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
+			if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
+				DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
+					__FUNCTION__, err));
+				ret = -EOPNOTSUPP;
+			} else {
+#if defined(SOFTAP)
+		if (ap_cfg_running && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+				 /* semaphore that the soft AP CODE waits on */
+				flags = dhd_os_spin_lock(&dhd->pub);
+
+				/* save ptr to wl0.1 netdev for use in wl_iw.c  */
+				ap_net_dev = ifp->net;
+				 /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
+				up(&ap_eth_ctl.sema);
+				dhd_os_spin_unlock(&dhd->pub, flags);
+		}
+#endif
+				DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
+					current->pid, ifp->net->name));
+				ifp->state = 0;
+			}
+		}
+		break;
+	case WLC_E_IF_DEL:
+		if (ifp->net != NULL) {
+			DHD_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n", __FUNCTION__));
+			netif_stop_queue(ifp->net);
+			unregister_netdev(ifp->net);
+			ret = DHD_DEL_IF;	/* Make sure the free_netdev() is called */
+		}
+		break;
+	default:
+		DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
+		ASSERT(!ifp->state);
+		break;
+	}
+
+	if (ret < 0) {
+		if (ifp->net) {
+			free_netdev(ifp->net);
+		}
+		dhd->iflist[ifp->idx] = NULL;
+		MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+#ifdef WL_CFG80211
+		if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
+			wl_cfg80211_notify_ifdel(ifp->net);
+#endif
+#ifdef SOFTAP
+		flags = dhd_os_spin_lock(&dhd->pub);
+		if (ifp->net == ap_net_dev)
+			ap_net_dev = NULL;   /*  NULL  SOFTAP global wl0.1 as well */
+		dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /*  SOFTAP */
+	}
+}
+
+static int
+_dhd_sysioc_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+
+	int i;
+#ifdef SOFTAP
+	bool in_ap = FALSE;
+	unsigned long flags;
+#endif
+
+	DAEMONIZE("dhd_sysioc");
+
+	complete(&tsk->completed);
+
+	while (down_interruptible(&tsk->sema) == 0) {
+
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk->terminated) {
+			break;
+		}
+
+		dhd_net_if_lock_local(dhd);
+		DHD_OS_WAKE_LOCK(&dhd->pub);
+
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i));
+#ifdef SOFTAP
+				flags = dhd_os_spin_lock(&dhd->pub);
+				in_ap = (ap_net_dev != NULL);
+				dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+				if (dhd->iflist[i]->state)
+					dhd_op_if(dhd->iflist[i]);
+#ifdef SOFTAP
+				if (dhd->iflist[i] == NULL) {
+					DHD_TRACE(("\n\n %s: interface %d just been removed,"
+						"!\n\n", __FUNCTION__, i));
+					continue;
+				}
+
+				if (in_ap && dhd->set_macaddress)  {
+					DHD_TRACE(("attempt to set MAC for %s in AP Mode,"
+						"blocked. \n", dhd->iflist[i]->net->name));
+					dhd->set_macaddress = FALSE;
+					continue;
+				}
+
+				if (in_ap && dhd->set_multicast)  {
+					DHD_TRACE(("attempt to set MULTICAST list for %s"
+					 "in AP Mode, blocked. \n", dhd->iflist[i]->net->name));
+					dhd->set_multicast = FALSE;
+					continue;
+				}
+#endif /* SOFTAP */
+				if (dhd->set_multicast) {
+					dhd->set_multicast = FALSE;
+					_dhd_set_multicast_list(dhd, i);
+				}
+				if (dhd->set_macaddress) {
+					dhd->set_macaddress = FALSE;
+					_dhd_set_mac_address(dhd, i, &dhd->macvalue);
+				}
+			}
+		}
+
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		dhd_net_if_unlock_local(dhd);
+	}
+	DHD_TRACE(("%s: stopped\n", __FUNCTION__));
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int ifidx;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return -1;
+
+	ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+	memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+	dhd->set_macaddress = TRUE;
+	up(&dhd->thr_sysioc_ctl.sema);
+
+	return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ifidx;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+	dhd->set_multicast = TRUE;
+	up(&dhd->thr_sysioc_ctl.sema);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+	ASSERT(di != NULL);
+	spin_lock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+	ASSERT(di != NULL);
+	spin_unlock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio)	wme_fifo2ac[prio2fifo[(prio)]]
+
+#endif /* PROP_TXSTATUS */
+int
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh = NULL;
+
+	/* Reject if down */
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		/* free the packet here since the caller won't */
+		PKTFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+
+	/* Update multicast statistic */
+	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+		eh = (struct ether_header *)pktdata;
+
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			dhdp->tx_multicast++;
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+			atomic_inc(&dhd->pend_8021x_cnt);
+	}
+
+	/* Look into the packet and update the packet priority */
+	if (PKTPRIO(pktbuf) == 0)
+		pktsetprio(pktbuf, FALSE);
+
+#ifdef PROP_TXSTATUS
+	if (dhdp->wlfc_state) {
+		/* store the interface ID */
+		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+		/* store destination MAC in the tag as well */
+		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+		/* decide which FIFO this packet belongs to */
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+		else
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+	} else
+#endif /* PROP_TXSTATUS */
+	/* If the protocol uses a data header, apply it */
+	dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+	/* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+	dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+	if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode
+			!= WLFC_FCMODE_NONE) {
+		dhd_os_wlfc_block(dhdp);
+		ret = dhd_wlfc_enque_sendq(dhdp->wlfc_state, DHD_PKTTAG_FIFO(PKTTAG(pktbuf)),
+			pktbuf);
+		dhd_wlfc_commit_packets(dhdp->wlfc_state,  (f_commitpkt_t)dhd_bus_txdata,
+			dhdp->bus);
+		if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) {
+			((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0;
+		}
+		dhd_os_wlfc_unblock(dhdp);
+	}
+	else
+		/* non-proptxstatus way */
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#else
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* PROP_TXSTATUS */
+
+
+	return ret;
+}
+
+static int
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	void *pktbuf;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	int ifidx;
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+	uint8 htsfdlystat_sz = 0;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	/* Reject if down */
+	if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
+		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+		netif_stop_queue(net);
+		/* Send Event when bus down detected during data session */
+		if (dhd->pub.busstate == DHD_BUS_DOWN)  {
+			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+			net_os_send_hang_message(net);
+		}
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+		netif_stop_queue(net);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	/* Make sure there's enough room for any header */
+
+	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+		struct sk_buff *skb2;
+
+		DHD_INFO(("%s: insufficient headroom\n",
+		          dhd_ifname(&dhd->pub, ifidx)));
+		dhd->pub.tx_realloc++;
+
+		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+		dev_kfree_skb(skb);
+		if ((skb = skb2) == NULL) {
+			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+			           dhd_ifname(&dhd->pub, ifidx)));
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+
+	/* Convert to packet */
+	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+		           dhd_ifname(&dhd->pub, ifidx)));
+		dev_kfree_skb_any(skb);
+		ret = -ENOMEM;
+		goto done;
+	}
+#ifdef WLMEDIA_HTSF
+	if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+		struct ether_header *eh = (struct ether_header *)pktdata;
+
+		if (!ETHER_ISMULTI(eh->ether_dhost) &&
+			(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+			eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+		}
+	}
+#endif
+
+	ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+
+done:
+	if (ret)
+		dhd->pub.dstats.tx_dropped++;
+	else
+		dhd->pub.tx_packets++;
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	/* Return ok: we always eat the packet */
+	return 0;
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+	struct net_device *net;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhdp->txoff = state;
+	ASSERT(dhd);
+
+	if (ifidx == ALL_INTERFACES) {
+		/* Flow control on all active interfaces */
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				net = dhd->iflist[i]->net;
+				if (state == ON)
+					netif_stop_queue(net);
+				else
+					netif_wake_queue(net);
+			}
+		}
+	}
+	else {
+		if (dhd->iflist[ifidx]) {
+			net = dhd->iflist[ifidx]->net;
+			if (state == ON)
+				netif_stop_queue(net);
+			else
+				netif_wake_queue(net);
+		}
+	}
+}
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	uchar *eth;
+	uint len;
+	void *data, *pnext, *save_pktbuf;
+	int i;
+	dhd_if_t *ifp;
+	wl_event_msg_t event;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	save_pktbuf = pktbuf;
+
+	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+		struct ether_header *eh;
+		struct dot11_llc_snap_header *lsh;
+
+		pnext = PKTNEXT(dhdp->osh, pktbuf);
+		PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+
+		eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf);
+		lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+		if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
+		    (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) &&
+		    bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+		    lsh->type == HTON16(BTA_PROT_L2CAP)) {
+			amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
+			        ((uint8 *)eh + RFC1042_HDR_LEN);
+			ACL_data = NULL;
+		}
+
+#ifdef PROP_TXSTATUS
+		if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) {
+			/* WLFC may send header only packet when
+			there is an urgent message but no packet to
+			piggy-back on
+			*/
+			((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++;
+			PKTFREE(dhdp->osh, pktbuf, TRUE);
+			continue;
+		}
+#endif
+
+		skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+		/* Get the protocol, maintain skb around eth_type_trans()
+		 * The main reason for this hack is for the limitation of
+		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+		 * coping of the packet coming from the network stack to add
+		 * BDC, Hardware header etc, during network interface registration
+		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+		 * for BDC, Hardware header etc. and not just the ETH_HLEN
+		 */
+		eth = skb->data;
+		len = skb->len;
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			dhd->pub.rx_multicast++;
+		}
+
+		skb->data = eth;
+		skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+	dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Process special event packets and then discard them */
+		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+			dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+			skb->mac_header,
+#else
+			skb->mac.raw,
+#endif
+			&event,
+			&data);
+
+			wl_event_to_host_order(&event);
+			if (event.event_type == WLC_E_BTA_HCI_EVENT) {
+				dhd_bta_doevt(dhdp, data, event.datalen);
+			}
+		}
+
+
+		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+		if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
+			ifp = dhd->iflist[ifidx];
+
+		if (ifp->net)
+			ifp->net->last_rx = jiffies;
+
+		dhdp->dstats.rx_bytes += skb->len;
+		dhdp->rx_packets++; /* Local count */
+
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			/* If the receive is not processed inside an ISR,
+			 * the softirqd must be woken explicitly to service
+			 * the NET_RX_SOFTIRQ.  In 2.6 kernels, this is handled
+			 * by netif_rx_ni(), but in earlier kernels, we need
+			 * to do it manually.
+			 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+			netif_rx_ni(skb);
+#else
+			ulong flags;
+			netif_rx(skb);
+			local_irq_save(flags);
+			RAISE_RX_SOFTIRQ();
+			local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+		}
+	}
+	DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(dhdp);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+	/* Linux version has nothing to do */
+	return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	uint ifidx;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh;
+	uint16 type;
+	uint len;
+
+	dhd_prot_hdrpull(dhdp, &ifidx, txp);
+
+	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+	type  = ntoh16(eh->ether_type);
+
+	if (type == ETHER_TYPE_802_1X)
+		atomic_dec(&dhd->pend_8021x_cnt);
+
+	/* Crack open the packet and check to see if it is BT HCI ACL data packet.
+	 * If yes generate packet completion event.
+	 */
+	len = PKTLEN(dhdp->osh, txp);
+
+	/* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
+	if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
+		struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+		if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+		    ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+
+			dhd_bta_tx_hcidata_complete(dhdp, txp, success);
+		}
+	}
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	dhd_if_t *ifp;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF)
+		return NULL;
+
+	ifp = dhd->iflist[ifidx];
+	ASSERT(dhd && ifp);
+
+	if (dhd->pub.up) {
+		/* Use the protocol to get dongle stats */
+		dhd_prot_dstats(&dhd->pub);
+	}
+
+	/* Copy dongle stats to net device stats */
+	ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
+	ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
+	ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
+	ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
+	ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
+	ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
+	ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
+	ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
+	ifp->stats.multicast = dhd->pub.dstats.multicast;
+
+	return &ifp->stats;
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_watchdog_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+#ifdef DHD_SCHED
+	if (dhd_watchdog_prio > 0) {
+		struct sched_param param;
+		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+			dhd_watchdog_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+#endif /* DHD_SCHED */
+
+	DAEMONIZE("dhd_watchdog");
+
+	/* Run until signal received */
+	complete(&tsk->completed);
+
+	while (1)
+		if (down_interruptible (&tsk->sema) == 0) {
+			unsigned long flags;
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			dhd_os_sdlock(&dhd->pub);
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+
+				/* Call the bus module watchdog */
+				dhd_bus_watchdog(&dhd->pub);
+
+				flags = dhd_os_spin_lock(&dhd->pub);
+				/* Count the tick for reference */
+				dhd->pub.tickcnt++;
+				/* Reschedule the watchdog */
+				if (dhd->wd_timer_valid)
+					mod_timer(&dhd->timer,
+					jiffies + dhd_watchdog_ms * HZ / 1000);
+				dhd_os_spin_unlock(&dhd->pub, flags);
+			}
+			dhd_os_sdunlock(&dhd->pub);
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		} else {
+			break;
+		}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void dhd_watchdog(ulong data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+	unsigned long flags;
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	if (dhd->pub.dongle_reset) {
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return;
+	}
+
+#ifdef DHDTHREAD
+	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+		up(&dhd->thr_wdt_ctl.sema);
+		return;
+	}
+#endif /* DHDTHREAD */
+
+	dhd_os_sdlock(&dhd->pub);
+	/* Call the bus module watchdog */
+	dhd_bus_watchdog(&dhd->pub);
+
+	flags = dhd_os_spin_lock(&dhd->pub);
+	/* Count the tick for reference */
+	dhd->pub.tickcnt++;
+
+	/* Reschedule the watchdog */
+	if (dhd->wd_timer_valid)
+		mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+	dhd_os_spin_unlock(&dhd->pub, flags);
+	dhd_os_sdunlock(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_dpc_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+#ifdef DHD_SCHED
+	if (dhd_dpc_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+#endif /* DHD_SCHED */
+
+	DAEMONIZE("dhd_dpc");
+	/* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
+
+	/*  signal: thread has started */
+	complete(&tsk->completed);
+
+	/* Run until signal received */
+	while (1) {
+		if (down_interruptible(&tsk->sema) == 0) {
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			/* Call bus dpc unless it indicated down (then clean stop) */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				if (dhd_bus_dpc(dhd->pub.bus)) {
+					up(&tsk->sema);
+				}
+				else {
+					DHD_OS_WAKE_UNLOCK(&dhd->pub);
+				}
+			} else {
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+			}
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void
+dhd_dpc(ulong data)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)data;
+
+	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+	 * down below , wake lock is set,
+	 * the tasklet is initialized in dhd_attach()
+	 */
+	/* Call bus dpc unless it indicated down (then clean stop) */
+	if (dhd->pub.busstate != DHD_BUS_DOWN) {
+		if (dhd_bus_dpc(dhd->pub.bus))
+			tasklet_schedule(&dhd->tasklet);
+		else
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	} else {
+		dhd_bus_stop(dhd->pub.bus, TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	}
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHDTHREAD
+	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+		up(&dhd->thr_dpc_ctl.sema);
+		return;
+	}
+#endif /* DHDTHREAD */
+
+	tasklet_schedule(&dhd->tasklet);
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strcpy(buf, "toe_ol");
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		/* Check for older dongle image that doesn't support toe_ol */
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: toe not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+
+		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	memcpy(toe_ol, buf, sizeof(uint32));
+	return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int toe, ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = TRUE;
+
+	/* Set toe_ol as requested */
+
+	strcpy(buf, "toe_ol");
+	memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+			dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	/* Enable toe globally only if any components are enabled. */
+
+	toe = (toe_ol != 0);
+
+	strcpy(buf, "toe");
+	memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+	sprintf(info->driver, "wl");
+	sprintf(info->version, "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+	.get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+	struct ethtool_drvinfo info;
+	char drvname[sizeof(info.driver)];
+	uint32 cmd;
+#ifdef TOE
+	struct ethtool_value edata;
+	uint32 toe_cmpnt, csum_dir;
+	int ret;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* all ethtool calls start with a cmd word */
+	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GDRVINFO:
+		/* Copy out any request driver name */
+		if (copy_from_user(&info, uaddr, sizeof(info)))
+			return -EFAULT;
+		strncpy(drvname, info.driver, sizeof(info.driver));
+		drvname[sizeof(info.driver)-1] = '\0';
+
+		/* clear struct for return */
+		memset(&info, 0, sizeof(info));
+		info.cmd = cmd;
+
+		/* if dhd requested, identify ourselves */
+		if (strcmp(drvname, "?dhd") == 0) {
+			sprintf(info.driver, "dhd");
+			strcpy(info.version, EPI_VERSION_STR);
+		}
+
+		/* otherwise, require dongle to be up */
+		else if (!dhd->pub.up) {
+			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+			return -ENODEV;
+		}
+
+		/* finally, report dongle driver type */
+		else if (dhd->pub.iswl)
+			sprintf(info.driver, "wl");
+		else
+			sprintf(info.driver, "xx");
+
+		sprintf(info.version, "%lu", dhd->pub.drv_version);
+		if (copy_to_user(uaddr, &info, sizeof(info)))
+			return -EFAULT;
+		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+		         (int)sizeof(drvname), drvname, info.driver));
+		break;
+
+#ifdef TOE
+	/* Get toe offload components from dongle */
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GTXCSUM:
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		edata.cmd = cmd;
+		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+		if (copy_to_user(uaddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+
+	/* Set toe offload components in dongle */
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_STXCSUM:
+		if (copy_from_user(&edata, uaddr, sizeof(edata)))
+			return -EFAULT;
+
+		/* Read the current settings, update and write back */
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		if (edata.data != 0)
+			toe_cmpnt |= csum_dir;
+		else
+			toe_cmpnt &= ~csum_dir;
+
+		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+			return ret;
+
+		/* If setting TX checksum mode, tell Linux the new mode */
+		if (cmd == ETHTOOL_STXCSUM) {
+			if (edata.data)
+				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+			else
+				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+
+		break;
+#endif /* TOE */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	dhd_ioctl_t ioc;
+	int bcmerror = 0;
+	int buflen = 0;
+	void *buf = NULL;
+	uint driver = 0;
+	int ifidx;
+	int ret;
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -1;
+	}
+
+#if defined(CONFIG_WIRELESS_EXT)
+	/* linux wireless extensions */
+	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+		/* may recurse, do NOT lock */
+		ret = wl_iw_ioctl(net, ifr, cmd);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+	if (cmd == SIOCETHTOOL) {
+		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(net, ifr, cmd);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+
+	if (cmd != SIOCDEVPRIVATE) {
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -EOPNOTSUPP;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	/* Copy the ioc control structure part of ioctl request */
+	if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+		bcmerror = -BCME_BADADDR;
+		goto done;
+	}
+
+	/* Copy out any buffer passed */
+	if (ioc.buf) {
+		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+		/* optimization for direct ioctl calls from kernel */
+		/*
+		if (segment_eq(get_fs(), KERNEL_DS)) {
+			buf = ioc.buf;
+		} else {
+		*/
+		{
+			if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
+				bcmerror = -BCME_NOMEM;
+				goto done;
+			}
+			if (copy_from_user(buf, ioc.buf, buflen)) {
+				bcmerror = -BCME_BADADDR;
+				goto done;
+			}
+		}
+	}
+
+	/* To differentiate between wl and dhd read 4 more byes */
+	if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+		sizeof(uint)) != 0)) {
+		bcmerror = -BCME_BADADDR;
+		goto done;
+	}
+
+	if (!capable(CAP_NET_ADMIN)) {
+		bcmerror = -BCME_EPERM;
+		goto done;
+	}
+
+	/* check for local dhd ioctl and handle it */
+	if (driver == DHD_IOCTL_MAGIC) {
+		bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+		if (bcmerror)
+			dhd->pub.bcmerror = bcmerror;
+		goto done;
+	}
+
+	/* send to dongle (must be up, and wl). */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	if (!dhd->pub.iswl) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	/*
+	 * Flush the TX queue if required for proper message serialization:
+	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+	 * prevent M4 encryption and
+	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+	 * prevent disassoc frame being sent before WPS-DONE frame.
+	 */
+	if (ioc.cmd == WLC_SET_KEY ||
+	    (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+	     strncmp("wsec_key", ioc.buf, 9) == 0) ||
+	    (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+	     strncmp("bsscfg:wsec_key", ioc.buf, 15) == 0) ||
+	    ioc.cmd == WLC_DISASSOC)
+		dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+	if (ioc.buf) {
+		/*  short cut wl ioctl calls here  */
+		if (strcmp("htsf", ioc.buf) == 0) {
+			dhd_ioctl_htsf_get(dhd, 0);
+			return BCME_OK;
+		}
+
+		if (strcmp("htsflate", ioc.buf) == 0) {
+			if (ioc.set) {
+				memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+				memset(&maxdelayts, 0, sizeof(tstamp_t));
+				maxdelay = 0;
+				tspktcnt = 0;
+				maxdelaypktno = 0;
+				memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			} else {
+				dhd_dump_latency();
+			}
+			return BCME_OK;
+		}
+		if (strcmp("htsfclear", ioc.buf) == 0) {
+			memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			htsf_seqnum = 0;
+			return BCME_OK;
+		}
+		if (strcmp("htsfhis", ioc.buf) == 0) {
+			dhd_dump_htsfhisto(&vi_d1, "H to D");
+			dhd_dump_htsfhisto(&vi_d2, "D to D");
+			dhd_dump_htsfhisto(&vi_d3, "D to H");
+			dhd_dump_htsfhisto(&vi_d4, "H to H");
+			return BCME_OK;
+		}
+		if (strcmp("tsport", ioc.buf) == 0) {
+			if (ioc.set) {
+				memcpy(&tsport, ioc.buf + 7, 4);
+			} else {
+				DHD_ERROR(("current timestamp port: %d \n", tsport));
+			}
+			return BCME_OK;
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+
+	bcmerror = dhd_wl_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+
+done:
+	if ((bcmerror == -ETIMEDOUT) || ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+		(!dhd->pub.dongle_reset))) {
+		DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
+		net_os_send_hang_message(net);
+	}
+
+	if (!bcmerror && buf && ioc.buf) {
+		if (copy_to_user(ioc.buf, buf, buflen))
+			bcmerror = -EFAULT;
+	}
+
+	if (buf)
+		MFREE(dhd->pub.osh, buf, buflen);
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return OSL_ERROR(bcmerror);
+}
+
+static int
+dhd_stop(struct net_device *net)
+{
+	int ifidx;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	if (dhd->pub.up == 0) {
+		return 0;
+	}
+	ifidx = dhd_net2idx(dhd, net);
+
+#ifdef WL_CFG80211
+	if (ifidx == 0)
+		wl_cfg80211_down();
+#endif
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_cleanup(&dhd->pub);
+#endif
+	/* Set state and stop OS transmissions */
+	dhd->pub.up = 0;
+	netif_stop_queue(net);
+
+	/* Stop the protocol module */
+	dhd_prot_stop(&dhd->pub);
+
+#if defined(WL_CFG80211)
+	if (ifidx == 0)
+		wl_android_wifi_off(net);
+#endif
+
+	OLD_MOD_DEC_USE_COUNT;
+	return 0;
+}
+
+static int
+dhd_open(struct net_device *net)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+#ifdef TOE
+	uint32 toe_ol;
+#endif
+	int ifidx;
+	int32 ret = 0;
+
+#if !defined(WL_CFG80211)
+	/** Force start if ifconfig_up gets called before START command
+	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
+	 *  This should be removed in the future
+	 */
+	wl_control_wl_start(net);
+#endif
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if (ifidx < 0) {
+		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == WLC_E_IF_DEL) {
+		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (ifidx == 0) {
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+		wl_android_wifi_on(net);
+#endif
+
+		if (dhd->pub.busstate != DHD_BUS_DATA) {
+			int ret;
+
+			/* try to bring up bus */
+			if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				return -1;
+			}
+
+		}
+
+		/* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
+		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+		/* Get current TOE mode from dongle */
+		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+		else
+			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+		if (unlikely(wl_cfg80211_up())) {
+			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+			return -1;
+		}
+#endif /* WL_CFG80211 */
+	}
+
+	/* Allow transmit calls */
+	netif_start_queue(net);
+	dhd->pub.up = 1;
+
+#ifdef BCMDBGFS
+	dhd_dbg_init(&dhd->pub);
+#endif
+
+	OLD_MOD_INC_USE_COUNT;
+	return ret;
+}
+
+osl_t *
+dhd_osl_attach(void *pdev, uint bustype)
+{
+	return osl_attach(pdev, bustype, TRUE);
+}
+
+void
+dhd_osl_detach(osl_t *osh)
+{
+	if (MALLOCED(osh)) {
+		DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+	}
+	osl_detach(osh);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1
+	up(&dhd_registration_sem);
+#endif
+}
+
+int
+dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+	uint8 *mac_addr, uint32 flags, uint8 bssidx)
+{
+	dhd_if_t *ifp;
+
+	DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+
+	ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+
+	ifp = dhd->iflist[ifidx];
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			netif_stop_queue(ifp->net);
+			unregister_netdev(ifp->net);
+			free_netdev(ifp->net);
+		}
+	} else
+		if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) {
+			DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
+			return -ENOMEM;
+		}
+
+	memset(ifp, 0, sizeof(dhd_if_t));
+	ifp->info = dhd;
+	dhd->iflist[ifidx] = ifp;
+	strncpy(ifp->name, name, IFNAMSIZ);
+	ifp->name[IFNAMSIZ] = '\0';
+	if (mac_addr != NULL)
+		memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+	if (handle == NULL) {
+		ifp->state = WLC_E_IF_ADD;
+		ifp->idx = ifidx;
+		ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+		up(&dhd->thr_sysioc_ctl.sema);
+	} else
+		ifp->net = (struct net_device *)handle;
+
+	return 0;
+}
+
+void
+dhd_del_if(dhd_info_t *dhd, int ifidx)
+{
+	dhd_if_t *ifp;
+
+	DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
+	ifp = dhd->iflist[ifidx];
+	if (!ifp) {
+		DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
+		return;
+	}
+
+	ifp->state = WLC_E_IF_DEL;
+	ifp->idx = ifidx;
+	ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+	up(&dhd->thr_sysioc_ctl.sema);
+}
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+	dhd_info_t *dhd = NULL;
+	struct net_device *net = NULL;
+
+	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	/* updates firmware nvram path if it was provided as module parameters */
+	if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
+		strcpy(fw_path, firmware_path);
+	if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
+		strcpy(nv_path, nvram_path);
+
+	/* Allocate etherdev, including space for private structure */
+	if (!(net = alloc_etherdev(sizeof(dhd)))) {
+		DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_NET_ALLOC;
+
+	/* Allocate primary dhd_info */
+	if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
+		DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(dhd, 0, sizeof(dhd_info_t));
+
+#ifdef DHDTHREAD
+	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#else
+	dhd->dhd_tasklet_create = FALSE;
+#endif /* DHDTHREAD */
+	dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID;
+	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+	/*
+	 * Save the dhd_info into the priv
+	 */
+	memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd));
+	dhd->pub.osh = osh;
+
+	/* Link to info module */
+	dhd->pub.info = dhd;
+	/* Link to bus module */
+	dhd->pub.bus = bus;
+	dhd->pub.hdrlen = bus_hdrlen;
+
+	/* Set network interface name if it was provided as module parameter */
+	if (iface_name[0]) {
+		int len;
+		char ch;
+		strncpy(net->name, iface_name, IFNAMSIZ);
+		net->name[IFNAMSIZ - 1] = 0;
+		len = strlen(net->name);
+		ch = net->name[len - 1];
+		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+			strcat(net->name, "%d");
+	}
+
+	if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+		goto fail;
+	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+
+	sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+	spin_lock_init(&dhd->wlfc_spinlock);
+	dhd->pub.wlfc_enabled = TRUE;
+#endif /* PROP_TXSTATUS */
+
+	/* Initialize other structure content */
+	init_waitqueue_head(&dhd->ioctl_resp_wait);
+	init_waitqueue_head(&dhd->ctrl_wait);
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&dhd->sdlock);
+	spin_lock_init(&dhd->txqlock);
+	spin_lock_init(&dhd->dhd_lock);
+
+	/* Initialize Wakelock stuff */
+	spin_lock_init(&dhd->wakelock_spinlock);
+	dhd->wakelock_counter = 0;
+	dhd->wakelock_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhd->dhd_net_if_mutex);
+#endif
+	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+	/* Attach and link in the protocol */
+	if (dhd_prot_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_prot_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+	/* Attach and link in the cfg80211 */
+	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+		DHD_ERROR(("wl_cfg80211_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(CONFIG_WIRELESS_EXT)
+	/* Attach and link in the iw */
+	if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
+		if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+			DHD_ERROR(("wl_iw_attach failed\n"));
+			goto fail;
+		}
+	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+	}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+
+	/* Set up the watchdog timer */
+	init_timer(&dhd->timer);
+	dhd->timer.data = (ulong)dhd;
+	dhd->timer.function = dhd_watchdog;
+
+#ifdef DHDTHREAD
+	/* Initialize thread based operation and lock */
+	sema_init(&dhd->sdsem, 1);
+	if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
+		dhd->threads_only = TRUE;
+	}
+	else {
+		dhd->threads_only = FALSE;
+	}
+
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize watchdog thread */
+		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0);
+	} else {
+		dhd->thr_wdt_ctl.thr_pid = -1;
+	}
+
+	/* Set up the bottom half handler */
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize DPC thread */
+		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0);
+	} else {
+		/*  use tasklet for dpc */
+		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+		dhd->thr_dpc_ctl.thr_pid = -1;
+	}
+#else
+	/* Set up the bottom half handler */
+	tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+	dhd->dhd_tasklet_create = TRUE;
+#endif /* DHDTHREAD */
+
+	if (dhd_sysioc) {
+		PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0);
+	} else {
+		dhd->thr_sysioc_ctl.thr_pid = -1;
+	}
+	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+	/*
+	 * Save the dhd_info into the priv
+	 */
+	memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) && 1
+	register_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /*  (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+	dhd->early_suspend.suspend = dhd_early_suspend;
+	dhd->early_suspend.resume = dhd_late_resume;
+	register_early_suspend(&dhd->early_suspend);
+	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	register_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	dhd_state |= DHD_ATTACH_STATE_DONE;
+	dhd->dhd_state = dhd_state;
+	return &dhd->pub;
+
+fail:
+	if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) {
+		if (net) free_netdev(net);
+	} else {
+		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+			__FUNCTION__, dhd_state, &dhd->pub));
+		dhd->dhd_state = dhd_state;
+		dhd_detach(&dhd->pub);
+		dhd_free(&dhd->pub);
+	}
+
+
+	return NULL;
+}
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+	int ret = -1;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+	unsigned long flags;
+#ifdef EMBEDDED_PLATFORM
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+#endif /* EMBEDDED_PLATFORM */
+
+	ASSERT(dhd);
+
+	DHD_TRACE(("%s: \n", __FUNCTION__));
+
+	dhd_os_sdlock(dhdp);
+
+	/* try to download image and nvram to the dongle */
+	if  ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+		(fw_path != NULL) && (fw_path[0] != '\0') &&
+		(nv_path != NULL) && (nv_path[0] != '\0')) {
+		/* wake lock moved to dhdsdio_download_firmware */
+		if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+		                                fw_path, nv_path))) {
+			DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
+			           __FUNCTION__, fw_path, nv_path));
+			dhd_os_sdunlock(dhdp);
+			return -1;
+		}
+	}
+	if (dhd->pub.busstate != DHD_BUS_LOAD) {
+		dhd_os_sdunlock(dhdp);
+		return -ENETDOWN;
+	}
+
+	/* Start the watchdog timer */
+	dhd->pub.tickcnt = 0;
+	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+	/* Bring up the bus */
+	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+		dhd_os_sdunlock(dhdp);
+		return ret;
+	}
+#if defined(OOB_INTR_ONLY)
+	/* Host registration for OOB interrupt */
+	if (bcmsdh_register_oob_intr(dhdp)) {
+		/* deactivate timer and wait for the handler to finish */
+		flags = dhd_os_spin_lock(&dhd->pub);
+		dhd->wd_timer_valid = FALSE;
+		dhd_os_spin_unlock(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+
+		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		return -ENODEV;
+	}
+
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+	/* If bus is not ready, can't come up */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		flags = dhd_os_spin_lock(&dhd->pub);
+		dhd->wd_timer_valid = FALSE;
+		dhd_os_spin_unlock(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		return -ENODEV;
+	}
+
+	dhd_os_sdunlock(dhdp);
+
+#ifdef EMBEDDED_PLATFORM
+	bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	bcopy(iovbuf, dhdp->eventmask, WL_EVENTING_MASK_LEN);
+
+	setbit(dhdp->eventmask, WLC_E_SET_SSID);
+	setbit(dhdp->eventmask, WLC_E_PRUNE);
+	setbit(dhdp->eventmask, WLC_E_AUTH);
+	setbit(dhdp->eventmask, WLC_E_REASSOC);
+	setbit(dhdp->eventmask, WLC_E_REASSOC_IND);
+	setbit(dhdp->eventmask, WLC_E_DEAUTH_IND);
+	setbit(dhdp->eventmask, WLC_E_DISASSOC_IND);
+	setbit(dhdp->eventmask, WLC_E_DISASSOC);
+	setbit(dhdp->eventmask, WLC_E_JOIN);
+	setbit(dhdp->eventmask, WLC_E_ASSOC_IND);
+	setbit(dhdp->eventmask, WLC_E_PSK_SUP);
+	setbit(dhdp->eventmask, WLC_E_LINK);
+	setbit(dhdp->eventmask, WLC_E_NDIS_LINK);
+	setbit(dhdp->eventmask, WLC_E_MIC_ERROR);
+	setbit(dhdp->eventmask, WLC_E_PMKID_CACHE);
+	setbit(dhdp->eventmask, WLC_E_TXFAIL);
+	setbit(dhdp->eventmask, WLC_E_JOIN_START);
+	setbit(dhdp->eventmask, WLC_E_SCAN_COMPLETE);
+
+#ifdef PNO_SUPPORT
+	setbit(dhdp->eventmask, WLC_E_PFN_NET_FOUND);
+#endif /* PNO_SUPPORT */
+
+/* enable dongle roaming event */
+	setbit(dhdp->eventmask, WLC_E_ROAM);
+
+
+	dhdp->pktfilter_count = 4;
+	/* Setup filter to allow only unicast */
+	dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00";
+	dhdp->pktfilter[1] = NULL;
+	dhdp->pktfilter[2] = NULL;
+	dhdp->pktfilter[3] = NULL;
+#endif /* EMBEDDED_PLATFORM */
+
+#ifdef READ_MACADDR
+	dhd_read_macaddr(dhd);
+#endif
+
+	/* Bus is ready, do any protocol initialization */
+	if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+		return ret;
+
+#ifdef WRITE_MACADDR
+	dhd_write_macaddr(dhd->pub.mac.octet);
+#endif
+
+#if defined(CONFIG_SYSCTL) && defined(WL_CFG80211)
+	wl_cfg80211_sysctl_export_devaddr(&dhd->pub);
+#endif
+
+	return 0;
+}
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+	char buf[strlen(name) + 1 + cmd_len];
+	int len = sizeof(buf);
+	wl_ioctl_t ioc;
+	int ret;
+
+	len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (!set && ret >= 0)
+		memcpy(cmd_buf, buf, cmd_len);
+
+	return ret;
+}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+	.ndo_open = dhd_open,
+	.ndo_stop = dhd_stop,
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+};
+
+static struct net_device_ops dhd_ops_virt = {
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+	struct dhd_info *dhd = dhdp->info;
+	struct net_device *dev = NULL;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+	ASSERT(dev);
+
+	if (netif_running(dev)) {
+		DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+		return BCME_NOTDOWN;
+	}
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+		return BCME_BADARG;
+	}
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add)
+{
+	u32 ipv4_buf[8]; /* temp save for AOE host_ip table */
+	int i;
+
+	bzero(ipv4_buf, sizeof(ipv4_buf));
+
+	/* display what we've got */
+	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+
+	/* now we saved hoste_ip table, clr it in the dongle AOE */
+	dhd_aoe_hostip_clr(dhd_pub);
+
+	for (i = 0; i < 8; i++) {
+
+		if (add && (ipv4_buf[i] == 0)) {
+
+				ipv4_buf[i]	= ipa;
+				add = FALSE; /* added ipa to local table  */
+				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+				__FUNCTION__, i));
+
+		} else if (ipv4_buf[i] == ipa) {
+			ipv4_buf[i]	= 0;
+			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+				__FUNCTION__, ipa, i));
+		}
+
+		if (ipv4_buf[i] != 0) {
+			/* add back host_ip entries from our local cache */
+			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i]);
+			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+				__FUNCTION__, ipv4_buf[i], i));
+		}
+	}
+#ifdef AOE_DBG
+	/* see the resulting hostip table */
+	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+static int dhd_device_event(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+
+	if (!ifa)
+		return NOTIFY_DONE;
+
+	dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+	dhd_pub = &dhd->pub;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
+#else
+	if (ifa->ifa_dev->dev) {
+#endif
+		switch (event) {
+		case NETDEV_UP:
+			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+			if (ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a) {
+				DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+					__FUNCTION__));
+				aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+			}
+#endif
+			break;
+
+		case NETDEV_DOWN:
+			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+		if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) {
+				DHD_ARPOE(("%s: primary interface is down, AOE clr all\n",
+					__FUNCTION__));
+				dhd_aoe_hostip_clr(&dhd->pub);
+				dhd_aoe_arp_clr(&dhd->pub);
+		} else
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE);
+#else
+	dhd_aoe_hostip_clr(&dhd->pub);
+	dhd_aoe_arp_clr(&dhd->pub);
+#endif
+			break;
+
+		default:
+			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+				__func__, ifa->ifa_label, event));
+			break;
+		}
+	}
+	return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+int
+dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct net_device *net = NULL;
+	int err = 0;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+
+	net = dhd->iflist[ifidx]->net;
+	ASSERT(net);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->get_stats = dhd_get_stats;
+	net->do_ioctl = dhd_ioctl_entry;
+	net->hard_start_xmit = dhd_start_xmit;
+	net->set_mac_address = dhd_set_mac_address;
+	net->set_multicast_list = dhd_set_multicast_list;
+	net->open = net->stop = NULL;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &dhd_ops_virt;
+#endif
+
+	/* Ok, link into the network layer... */
+	if (ifidx == 0) {
+		/*
+		 * device functions for the primary interface only
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+		net->open = dhd_open;
+		net->stop = dhd_stop;
+#else
+		net->netdev_ops = &dhd_ops_pri;
+#endif
+	} else {
+	/*
+	 * We have to use the primary MAC for virtual interfaces
+	 */
+		memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
+		if (ifidx == 1) {
+			DHD_TRACE(("%s ACCESS POINT MAC: \n", __FUNCTION__));
+			/*  ACCESSPOINT INTERFACE CASE */
+			temp_addr[0] |= 0x02;  /* set bit 2 , - Locally Administered address  */
+		}
+	}
+	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+	net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+	net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	if ((err = register_netdev(net)) != 0) {
+		DHD_ERROR(("couldn't register the net device, err %d\n", err));
+		goto fail;
+	}
+
+	printf("%s: Driver up: Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+		net->name,
+	       dhd->pub.mac.octet[0], dhd->pub.mac.octet[1], dhd->pub.mac.octet[2],
+	       dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]);
+
+#if defined(SOFTAP) && defined(CONFIG_WIRELESS_EXT) && !defined(WL_CFG80211)
+		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (ifidx == 0) {
+		up(&dhd_registration_sem);
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+	return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+	return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+
+			/** In case of Android cfg80211 driver, the bus is down in dhd_stop,
+			 *  calling stop again will cuase SD read/write errors.
+			 */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				/* Stop the protocol module */
+				dhd_prot_stop(&dhd->pub);
+
+				/* Stop the bus module */
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+			}
+
+#if defined(OOB_INTR_ONLY)
+			bcmsdh_unregister_oob_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+		}
+	}
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	unsigned long flags;
+
+	if (!dhdp)
+		return;
+
+	dhd = (dhd_info_t *)dhdp->info;
+	if (!dhd)
+		return;
+
+	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+		/* Give sufficient time for threads to start running in case
+		 * dhd_attach() has failed
+		 */
+		osl_delay(1000*100);
+	}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	unregister_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+		if (dhd->early_suspend.suspend)
+			unregister_early_suspend(&dhd->early_suspend);
+	}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+		/* Detatch and unlink in the iw */
+		wl_iw_detach();
+	}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+	if (&dhd->thr_sysioc_ctl.thr_pid >= 0) {
+		PROC_STOP(&dhd->thr_sysioc_ctl);
+	}
+
+	/* delete all interfaces, start with virtual  */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+		int i = 1;
+		dhd_if_t *ifp;
+
+		for (i = 1; i < DHD_MAX_IFS; i++)
+			if (dhd->iflist[i]) {
+				dhd->iflist[i]->state = WLC_E_IF_DEL;
+				dhd->iflist[i]->idx = i;
+				dhd_op_if(dhd->iflist[i]);
+			}
+
+		/*  delete primary interface 0 */
+		ifp = dhd->iflist[0];
+		ASSERT(ifp);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+		if (ifp->net->open)
+#else
+		if (ifp->net->netdev_ops == &dhd_ops_pri)
+#endif
+		{
+			unregister_netdev(ifp->net);
+			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+
+		}
+	}
+
+	/* Clear the watchdog timer */
+	flags = dhd_os_spin_lock(&dhd->pub);
+	dhd->wd_timer_valid = FALSE;
+	dhd_os_spin_unlock(&dhd->pub, flags);
+	del_timer_sync(&dhd->timer);
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHDTHREAD
+		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_wdt_ctl);
+		}
+
+		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_dpc_ctl);
+		}
+		else
+#endif /* DHDTHREAD */
+		tasklet_kill(&dhd->tasklet);
+	}
+	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+		dhd_bus_detach(dhdp);
+
+		if (dhdp->prot)
+			dhd_prot_detach(dhdp);
+	}
+
+#ifdef WL_CFG80211
+	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
+		wl_cfg80211_detach();
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+	unregister_pm_notifier(&dhd_sleep_pm_notifier);
+#endif
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock_destroy(&dhd->wl_wifi);
+		wake_lock_destroy(&dhd->wl_rxwake);
+#endif
+	}
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd)
+			MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+	}
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_bus_unregister();
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+	wl_android_wifictrl_func_del();
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+	wl_android_exit();
+
+	/* Call customer gpio to turn off power with WL_REG_ON signal */
+	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+}
+
+
+static int __init
+dhd_module_init(void)
+{
+	int error = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	wl_android_init();
+
+#ifdef DHDTHREAD
+	/* Sanity check on the module parameters */
+	do {
+		/* Both watchdog and DPC as tasklets are ok */
+		if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
+			break;
+
+		/* If both watchdog and DPC are threads, TX must be deferred */
+		if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
+			break;
+
+		DHD_ERROR(("Invalid module parameters.\n"));
+		return -EINVAL;
+	} while (0);
+#endif /* DHDTHREAD */
+
+	/* Call customer gpio to turn on power with WL_REG_ON signal */
+	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+	if (wl_android_wifictrl_func_add() < 0)
+		goto fail_1;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		sema_init(&dhd_registration_sem, 0);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+	error = dhd_bus_register();
+
+
+	if (!error)
+		printf("\n%s\n", dhd_version);
+	else {
+		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+		goto fail_1;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		/*
+		 * Wait till MMC sdio_register_driver callback called and made driver attach.
+		 * It's needed to make sync up exit from dhd insmod  and
+		 * Kernel MMC sdio device callback registration
+		 */
+	if (down_timeout(&dhd_registration_sem,  msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
+		error = -EINVAL;
+		DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
+		goto fail_2;
+		}
+#endif
+
+	return error;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1
+fail_2:
+	dhd_bus_unregister();
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+fail_1:
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+	wl_android_wifictrl_func_del();
+#endif 
+
+	/* Call customer gpio to turn off power with WL_REG_ON signal */
+	dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+
+	return error;
+}
+
+late_initcall(dhd_module_init);
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		down(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		up(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+	return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+	dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	DECLARE_WAITQUEUE(wait, current);
+	int timeout = dhd_ioctl_timeout_msec;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(timeout);
+#else
+	timeout = timeout * HZ / 1000;
+#endif
+
+	/* Wait until control frame is available */
+	add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	/* Memory barrier to support multi-processing
+	 * As the variable "condition", which points to dhd->rxlen (dhd_bus_rxctl[dhd_sdio.c])
+	 * Can be changed by another processor.
+	 */
+	smp_mb();
+	while (!(*condition) && (!signal_pending(current) && timeout)) {
+		timeout = schedule_timeout(timeout);
+		smp_mb();
+	}
+
+	if (signal_pending(current))
+		*pending = TRUE;
+
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+
+	return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (waitqueue_active(&dhd->ioctl_resp_wait)) {
+		wake_up_interruptible(&dhd->ioctl_resp_wait);
+	}
+
+	return 0;
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	flags = dhd_os_spin_lock(pub);
+
+	/* don't start the wd until fw is loaded */
+	if (pub->busstate == DHD_BUS_DOWN) {
+		dhd_os_spin_unlock(pub, flags);
+		return;
+	}
+
+	/* Totally stop the timer */
+	if (!wdtick && dhd->wd_timer_valid == TRUE) {
+		dhd->wd_timer_valid = FALSE;
+		dhd_os_spin_unlock(pub, flags);
+#ifdef DHDTHREAD
+		del_timer_sync(&dhd->timer);
+#else
+		del_timer(&dhd->timer);
+#endif /* DHDTHREAD */
+		return;
+	}
+
+	if (wdtick) {
+		dhd_watchdog_ms = (uint)wdtick;
+		/* Re arm the timer, at last watchdog period */
+		mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+		dhd->wd_timer_valid = TRUE;
+	}
+	dhd_os_spin_unlock(pub, flags);
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	/* wl_cfg80211_request_fw(filename); */
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	/* wl_cfg80211_read_fw(buf, len); */
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+	/* wl_cfg80211_release_fw(); */
+
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+	if (dhd->threads_only)
+		down(&dhd->sdsem);
+	else
+#endif /* DHDTHREAD */
+	spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+	if (dhd->threads_only)
+		up(&dhd->sdsem);
+	else
+#endif /* DHDTHREAD */
+	spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->txqlock);
+}
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+	dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+	dhd_os_sdunlock(pub);
+}
+
+#if defined(DHD_USE_STATIC_BUF)
+uint8* dhd_os_prealloc(void *osh, int section, uint size)
+{
+	return (uint8*)wl_android_prealloc(section, size);
+}
+
+void dhd_os_prefree(void *osh, void *addr, uint size)
+{
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+	int res = 0;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (!dhd->pub.up) {
+		return NULL;
+	}
+
+	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+	if (res == 0)
+		return &dhd->iw.wstats;
+	else
+		return NULL;
+}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+	wl_event_msg_t *event, void **data)
+{
+	int bcmerror = 0;
+	ASSERT(dhd != NULL);
+
+	bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
+	if (bcmerror != BCME_OK)
+		return (bcmerror);
+
+#if defined(CONFIG_WIRELESS_EXT)
+	if (event->bsscfgidx == 0) {
+		/*
+		 * Wireless ext is on primary interface only
+		 */
+
+		ASSERT(dhd->iflist[*ifidx] != NULL);
+		ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+		if (dhd->iflist[*ifidx]->net) {
+			wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+		}
+	}
+#endif /* defined(CONFIG_WIRELESS_EXT)  */
+
+#ifdef WL_CFG80211
+
+	if ((wl_cfg80211_is_progress_ifchange() ||
+		wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) {
+		/*
+		 * If IF_ADD/CHANGE operation is going on,
+		 *  discard any event received on the virtual I/F
+		 */
+		return (BCME_OK);
+	}
+
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+	if (dhd->iflist[*ifidx]->net) {
+		wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+	}
+#endif /* defined(WL_CFG80211) */
+
+	return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+	/* Send up locally generated AMP HCI Events */
+	case WLC_E_BTA_HCI_EVENT: {
+		struct sk_buff *p, *skb;
+		bcm_event_t *msg;
+		wl_event_msg_t *p_bcm_event;
+		char *ptr;
+		uint32 len;
+		uint32 pktlen;
+		dhd_if_t *ifp;
+		dhd_info_t *dhd;
+		uchar *eth;
+		int ifidx;
+
+		len = ntoh32(event->datalen);
+		pktlen = sizeof(bcm_event_t) + len + 2;
+		dhd = dhdp->info;
+		ifidx = dhd_ifname2idx(dhd, event->ifname);
+
+		if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+			ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+			msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
+
+			bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
+			bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
+			ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
+
+			msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+			/* BCM Vendor specific header... */
+			msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
+			msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
+			bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
+
+			/* vendor spec header length + pvt data length (private indication
+			 *  hdr + actual message itself)
+			 */
+			msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
+				BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
+			msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
+
+			PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+			/* copy  wl_event_msg_t into sk_buf */
+
+			/* pointer to wl_event_msg_t in sk_buf */
+			p_bcm_event = &msg->event;
+			bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
+
+			/* copy hci event into sk_buf */
+			bcopy(data, (p_bcm_event + 1), len);
+
+			msg->bcm_hdr.length  = hton16(sizeof(wl_event_msg_t) +
+				ntoh16(msg->bcm_hdr.length));
+			PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+			ptr = (char *)(msg + 1);
+			/* Last 2 bytes of the message are 0x00 0x00 to signal that there
+			 * are no ethertypes which are following this
+			 */
+			ptr[len+0] = 0x00;
+			ptr[len+1] = 0x00;
+
+			skb = PKTTONATIVE(dhdp->osh, p);
+			eth = skb->data;
+			len = skb->len;
+
+			ifp = dhd->iflist[ifidx];
+			if (ifp == NULL)
+			     ifp = dhd->iflist[0];
+
+			ASSERT(ifp);
+			skb->dev = ifp->net;
+			skb->protocol = eth_type_trans(skb, skb->dev);
+
+			skb->data = eth;
+			skb->len = len;
+
+			/* Strip header, count, deliver upward */
+			skb_pull(skb, ETH_HLEN);
+
+			/* Send the packet */
+			if (in_interrupt()) {
+				netif_rx(skb);
+			} else {
+				netif_rx_ni(skb);
+			}
+		}
+		else {
+			/* Could not allocate a sk_buf */
+			DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+		}
+		break;
+	} /* case WLC_E_BTA_HCI_EVENT */
+
+	default:
+		break;
+	}
+}
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	dhd_os_sdunlock(dhd);
+	wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
+	dhd_os_sdlock(dhd);
+#endif
+	return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	if (waitqueue_active(&dhdinfo->ctrl_wait))
+		wake_up_interruptible(&dhdinfo->ctrl_wait);
+#endif
+	return;
+}
+
+int
+dhd_dev_reset(struct net_device *dev, uint8 flag)
+{
+	int ret;
+
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	ret = dhd_bus_devreset(&dhd->pub, flag);
+	if (ret) {
+		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+
+	return ret;
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd) {
+		ret = dhd->pub.suspend_disable_flag;
+		dhd->pub.suspend_disable_flag = val;
+	}
+	return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val)
+{
+	int ret = 0;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd) {
+		ret = dhd_set_suspend(val, &dhd->pub);
+	}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+	return ret;
+}
+
+int net_os_set_dtim_skip(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd)
+		dhd->pub.dtim_skip = val;
+
+	return 0;
+}
+
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	char *filterp = NULL;
+	int ret = 0;
+
+	if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
+		return ret;
+	if (num >= dhd->pub.pktfilter_count)
+		return -EINVAL;
+	if (add_remove) {
+		switch (num) {
+		case DHD_BROADCAST_FILTER_NUM:
+			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+			break;
+		case DHD_MULTICAST4_FILTER_NUM:
+			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+			break;
+		case DHD_MULTICAST6_FILTER_NUM:
+			filterp = "103 0 0 0 0xFFFF 0x3333";
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	dhd->pub.pktfilter[num] = filterp;
+	return ret;
+}
+
+int net_os_set_packet_filter(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	/* Packet filtering is set only if we still in early-suspend and
+	 * we need either to turn it ON or turn it OFF
+	 * We can always turn it OFF in case of early-suspend, but we turn it
+	 * back ON only if suspend_disable_flag was not set
+	*/
+	if (dhd && dhd->pub.up) {
+		if (dhd->pub.in_suspend) {
+			if (!val || (val && !dhd->pub.suspend_disable_flag))
+				dhd_set_packet_filter(val, &dhd->pub);
+		}
+	}
+	return ret;
+}
+
+
+void
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	dhd_preinit_ioctls(&dhd->pub);
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_clean */
+int
+dhd_dev_pno_reset(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_clean(&dhd->pub));
+}
+
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev,  int pfn_enabled)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_enable(&dhd->pub, pfn_enabled));
+}
+
+
+/* Linux wrapper to call common dhd_pno_set */
+int
+dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+	ushort  scan_fr, int pno_repeat, int pno_freq_expo_max)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
+}
+
+/* Linux wrapper to get  pno status */
+int
+dhd_dev_get_pno_status(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_get_status(&dhd->pub));
+}
+
+#endif /* PNO_SUPPORT */
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd) {
+		if (!dhd->hang_was_sent) {
+			dhd->hang_was_sent = 1;
+#if defined(CONFIG_WIRELESS_EXT)
+			ret = wl_iw_send_priv_event(dev, "HANG");
+#endif
+		}
+	}
+	return ret;
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd && dhd->pub.up)
+			memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+}
+
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags = 0;
+
+	if (dhd)
+		spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+	return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+	return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX	10
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int timeout = 10 * HZ / 1000;
+	int ntimes = MAX_WAIT_FOR_8021X_TX;
+	int pend = dhd_get_pend_8021x_cnt(dhd);
+
+	while (ntimes && pend) {
+		if (pend) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(timeout);
+			set_current_state(TASK_RUNNING);
+			ntimes--;
+		}
+		pend = dhd_get_pend_8021x_cnt(dhd);
+	}
+	return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+	int ret = 0;
+	struct file *fp;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* open file to write */
+	fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+	if (!fp) {
+		printf("%s: open file error\n", __FUNCTION__);
+		ret = -1;
+		goto exit;
+	}
+
+	/* Write buf to file */
+	fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+	/* free buf before return */
+	MFREE(dhd->osh, buf, size);
+	/* close file before return */
+	if (fp)
+		filp_close(fp, current->files);
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		ret = dhd->wakelock_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (dhd->wakelock_timeout_enable)
+			wake_lock_timeout(&dhd->wl_rxwake, HZ);
+#endif
+		dhd->wakelock_timeout_enable = 0;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		dhd->wakelock_timeout_enable = 1;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int net_os_wake_lock_timeout_enable(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout_enable(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+		if (!dhd->wakelock_counter)
+			wake_lock(&dhd->wl_wifi);
+#endif
+		dhd->wakelock_counter++;
+		ret = dhd->wakelock_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	dhd_os_wake_lock_timeout(pub);
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter) {
+			dhd->wakelock_counter--;
+#ifdef CONFIG_HAS_WAKELOCK
+			if (!dhd->wakelock_counter)
+				wake_unlock(&dhd->wl_wifi);
+#endif
+			ret = dhd->wakelock_counter;
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_unlock(&dhd->pub);
+	return ret;
+}
+
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_interface_entry_update(void* state,	ewlfc_mac_entry_action_t action, uint8 ifid,
+	uint8 iftype, uint8* ea);
+extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits);
+
+int dhd_wlfc_interface_event(struct dhd_info *dhd, uint8 action, uint8 ifid, uint8 iftype,
+	uint8* ea)
+{
+	if (dhd->pub.wlfc_state == NULL)
+		return BCME_OK;
+
+	return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea);
+}
+
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data)
+{
+	if (dhd->pub.wlfc_state == NULL)
+		return BCME_OK;
+
+	return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data);
+}
+
+int dhd_wlfc_event(struct dhd_info *dhd)
+{
+	return dhd_wlfc_enable(&dhd->pub);
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+	struct dentry	*debugfs_dir;
+	struct dentry	*debugfs_mem;
+	dhd_pub_t 	*dhdp;
+	uint32 		size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+                       size_t count, loff_t *ppos)
+{
+	ssize_t rval;
+	uint32 tmp;
+	loff_t pos = *ppos;
+	size_t ret;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+	ret = copy_to_user(ubuf, &tmp, 4);
+	if (ret == count)
+		return -EFAULT;
+
+	count -= ret;
+	*ppos = pos + count;
+	rval = count;
+
+	return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	size_t ret;
+	uint32 buf;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+	if (ret == count)
+		return -EFAULT;
+
+	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+	return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	loff_t pos = -1;
+
+	switch (whence) {
+		case 0:
+			pos = off;
+			break;
+		case 1:
+			pos = file->f_pos + off;
+			break;
+		case 2:
+			pos = g_dbgfs.size - off;
+	}
+	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+	.read   = dhd_dbg_state_read,
+	.write	= dhd_debugfs_write,
+	.open   = dhd_dbg_state_open,
+	.llseek	= dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+	if (g_dbgfs.debugfs_dir) {
+		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+			NULL, &dhd_dbg_state_ops);
+	}
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+	int err;
+
+	g_dbgfs.dhdp = dhdp;
+	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+	if (IS_ERR(g_dbgfs.debugfs_dir)) {
+		err = PTR_ERR(g_dbgfs.debugfs_dir);
+		g_dbgfs.debugfs_dir = NULL;
+		return;
+	}
+
+	dhd_dbg_create();
+
+	return;
+}
+
+void dhd_dbg_remove(void)
+{
+	debugfs_remove(g_dbgfs.debugfs_mem);
+	debugfs_remove(g_dbgfs.debugfs_dir);
+
+	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct sk_buff *skb;
+	uint32 htsf = 0;
+	uint16 dport = 0, oldmagic = 0xACAC;
+	char *p1;
+	htsfts_t ts;
+
+	/*  timestamp packet  */
+
+	p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/*		memcpy(&proto, p1+26, 4);  	*/
+		memcpy(&dport, p1+40, 2);
+/* 	proto = ((ntoh32(proto))>> 16) & 0xFF;  */
+		dport = ntoh16(dport);
+	}
+
+	/* timestamp only if  icmp or udb iperf with port 5555 */
+/*	if (proto == 17 && dport == tsport) { */
+	if (dport >= tsport && dport <= tsport + 20) {
+
+		skb = (struct sk_buff *) pktbuf;
+
+		htsf = dhd_get_htsf(dhd, 0);
+		memset(skb->data + 44, 0, 2); /* clear checksum */
+		memcpy(skb->data+82, &oldmagic, 2);
+		memcpy(skb->data+84, &htsf, 4);
+
+		memset(&ts, 0, sizeof(htsfts_t));
+		ts.magic  = HTSFMAGIC;
+		ts.prio   = PKTPRIO(pktbuf);
+		ts.seqnum = htsf_seqnum++;
+		ts.c10    = get_cycles();
+		ts.t10    = htsf;
+		ts.endmagic = HTSFENDMAGIC;
+
+		memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+	}
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+	int pktcnt = 0, curval = 0, i;
+	for (i = 0; i < (NUMBIN-2); i++) {
+		curval += 500;
+		printf("%d ",  his->bin[i]);
+		pktcnt += his->bin[i];
+	}
+	printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+		his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+	int i, binval = 0;
+
+	if (value < 0) {
+		histo->bin[NUMBIN-1]++;
+		return;
+	}
+	if (value > histo->bin[NUMBIN-2])  /* store the max value  */
+		histo->bin[NUMBIN-2] = value;
+
+	for (i = 0; i < (NUMBIN-2); i++) {
+		binval += 500; /* 500m s bins */
+		if (value <= binval) {
+			histo->bin[i]++;
+			return;
+		}
+	}
+	histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	char *p1;
+	uint16 old_magic;
+	int d1, d2, d3, end2end;
+	htsfts_t *htsf_ts;
+	uint32 htsf;
+
+	skb = PKTTONATIVE(dhdp->osh, pktbuf);
+	p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+		memcpy(&old_magic, p1+78, 2);
+		htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+	}
+	else
+		return;
+
+	if (htsf_ts->magic == HTSFMAGIC) {
+		htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+		htsf_ts->cE0 = get_cycles();
+	}
+
+	if (old_magic == 0xACAC) {
+
+		tspktcnt++;
+		htsf = dhd_get_htsf(dhd, 0);
+		memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+		memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+		d1 = ts[tsidx].t2 - ts[tsidx].t1;
+		d2 = ts[tsidx].t3 - ts[tsidx].t2;
+		d3 = ts[tsidx].t4 - ts[tsidx].t3;
+		end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+		sorttobin(d1, &vi_d1);
+		sorttobin(d2, &vi_d2);
+		sorttobin(d3, &vi_d3);
+		sorttobin(end2end, &vi_d4);
+
+		if (end2end > 0 && end2end >  maxdelay) {
+			maxdelay = end2end;
+			maxdelaypktno = tspktcnt;
+			memcpy(&maxdelayts, &ts[tsidx], 16);
+		}
+		if (++tsidx >= TSMAX)
+			tsidx = 0;
+	}
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+	uint32 htsf = 0, cur_cycle, delta, delta_us;
+	uint32    factor, baseval, baseval2;
+	cycles_t t;
+
+	t = get_cycles();
+	cur_cycle = t;
+
+	if (cur_cycle >  dhd->htsf.last_cycle)
+		delta = cur_cycle -  dhd->htsf.last_cycle;
+	else {
+		delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
+	}
+
+	delta = delta >> 4;
+
+	if (dhd->htsf.coef) {
+		/* times ten to get the first digit */
+	        factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+		baseval  = (delta*10)/factor;
+		baseval2 = (delta*10)/(factor+1);
+		delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+		htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+	}
+	else {
+		DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+	}
+
+	return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+	int i, max = 0;
+	int d1, d2, d3, d4, d5;
+
+	printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
+	for (i = 0; i < TSMAX; i++) {
+		d1 = ts[i].t2 - ts[i].t1;
+		d2 = ts[i].t3 - ts[i].t2;
+		d3 = ts[i].t4 - ts[i].t3;
+		d4 = ts[i].t4 - ts[i].t1;
+		d5 = ts[max].t4-ts[max].t1;
+		if (d4 > d5 && d4 > 0)  {
+			max = i;
+		}
+		printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
+			ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+			d1, d2, d3, d4, i);
+	}
+
+	printf("current idx = %d \n", tsidx);
+
+	printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+	printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
+	maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+	maxdelayts.t2 - maxdelayts.t1,
+	maxdelayts.t3 - maxdelayts.t2,
+	maxdelayts.t4 - maxdelayts.t3,
+	maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+	uint32 s1, s2;
+
+	struct tsf {
+		uint32 low;
+		uint32 high;
+	} tsf_buf;
+
+	memset(&ioc, 0, sizeof(ioc));
+	memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strcpy(buf, "tsf");
+	s1 = dhd_get_htsf(dhd, 0);
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: tsf is not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+		return ret;
+	}
+	s2 = dhd_get_htsf(dhd, 0);
+
+	memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+	printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+		tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+		dhd->htsf.coefdec2, s2-tsf_buf.low);
+	printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+	return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+	static ulong  cur_cycle = 0, prev_cycle = 0;
+	uint32 htsf, tsf_delta = 0;
+	uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+	ulong b, a;
+	cycles_t t;
+
+	/* cycles_t in inlcude/mips/timex.h */
+
+	t = get_cycles();
+
+	prev_cycle = cur_cycle;
+	cur_cycle = t;
+
+	if (cur_cycle > prev_cycle)
+		cyc_delta = cur_cycle - prev_cycle;
+	else {
+		b = cur_cycle;
+		a = prev_cycle;
+		cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+	}
+
+	if (data == NULL)
+		printf(" tsf update ata point er is null \n");
+
+	memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+	memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+	if (cur_tsf.low == 0) {
+		DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+		return;
+	}
+
+	if (cur_tsf.low > prev_tsf.low)
+		tsf_delta = (cur_tsf.low - prev_tsf.low);
+	else {
+		DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+		 cur_tsf.low, prev_tsf.low));
+		if (cur_tsf.high > prev_tsf.high) {
+			tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+			DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
+		}
+		else
+			return; /* do not update */
+	}
+
+	if (tsf_delta)  {
+		hfactor = cyc_delta / tsf_delta;
+		tmp  = 	(cyc_delta - (hfactor * tsf_delta))*10;
+		dec1 =  tmp/tsf_delta;
+		dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+		tmp  = 	(tmp   - (dec1*tsf_delta))*10;
+		dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+		if (dec3 > 4) {
+			if (dec2 == 9) {
+				dec2 = 0;
+				if (dec1 == 9) {
+					dec1 = 0;
+					hfactor++;
+				}
+				else {
+					dec1++;
+				}
+			}
+			else
+				dec2++;
+		}
+	}
+
+	if (hfactor) {
+		htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
+		dhd->htsf.coef = hfactor;
+		dhd->htsf.last_cycle = cur_cycle;
+		dhd->htsf.last_tsf = cur_tsf.low;
+		dhd->htsf.coefdec1 = dec1;
+		dhd->htsf.coefdec2 = dec2;
+	}
+	else {
+		htsf = prev_tsf.low;
+	}
+}
+
+#endif /* WLMEDIA_HTSF */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 0000000..72290b5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,38 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c,v 1.3 2009-04-10 04:14:49 Exp $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+	int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+	return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 0000000..e0a54ad
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,105 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h,v 1.8.10.6 2010-12-22 23:47:24 Exp $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT  20000 /* In milli second */
+#endif
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                             void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_enque_sendq(void* state, int prec, void* p);
+extern int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx);
+extern void dhd_wlfc_cleanup(dhd_pub_t *dhd);
+#endif /* PROP_TXSTATUS */
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#elif defined(RNDIS)
+#define DHD_PROTOCOL "rndis"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 0000000..ce9d26b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,6212 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c,v 1.274.2.40 2011-02-09 22:42:44 Exp $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hndrte_armtrap.h>
+#include <hndrte_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME         "mem_dump"
+#endif
+
+#define QLEN		256	/* bulk rx and tx queue lengths */
+#define FCHI		(QLEN - 10)
+#define FCLOW		(FCHI / 2)
+#define PRIOMASK	7
+
+#define TXRETRIES	2	/* # of retries for tx frames */
+
+#if defined(CONFIG_MACH_SANDGATE2G)
+#define DHD_RXBOUND	250	/* Default for max rx frames in one scheduling */
+#else
+#define DHD_RXBOUND	50	/* Default for max rx frames in one scheduling */
+#endif /* defined(CONFIG_MACH_SANDGATE2G) */
+
+#define DHD_TXBOUND	20	/* Default for max tx frames in one scheduling */
+
+#define DHD_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE	4096	/* max nvram buf size */
+#define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold biggest possible glom */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD   32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN	(SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#ifdef SDTEST
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ	32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ	2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY	3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi.  Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+#if defined(OOB_INTR_ONLY)
+extern void bcmsdh_set_irq(int flag);
+#endif /* defined(OOB_INTR_ONLY) */
+#ifdef PROP_TXSTATUS
+extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+#endif
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+typedef struct dhd_console {
+	uint		count;			/* Poll interval msec counter */
+	uint		log_addr;		/* Log struct address (fixed) */
+	hndrte_log_t	log;			/* Log struct (host copy) */
+	uint		bufsize;		/* Size of log buffer */
+	uint8		*buf;			/* Log buffer (host copy) */
+	uint		last;			/* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+
+	bcmsdh_info_t	*sdh;			/* Handle for BCMSDH calls */
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+
+	sdpcmd_regs_t	*regs;			/* Registers for SDIO core */
+	uint		sdpcmrev;		/* SDIO core revision */
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		hostintmask;		/* Copy of Host Interrupt Mask */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path; /* module_param: path to firmware image */
+	char		*nv_path; /* module_param: path to nvram vars file */
+	const char      *nvram_params;		/* user specified nvram params. */
+
+	uint		blocksize;		/* Block size of SDIO transfers */
+	uint		roundup;		/* Max roundup limit */
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+	uint8		flowcontrol;		/* per prio flow control bitmask */
+	uint8		tx_seq;			/* Transmit sequence number (next) */
+	uint8		tx_max;			/* Maximum transmit sequence allowed */
+
+	uint8		hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+	uint8		*rxhdr;			/* Header of current rx frame (in hdrbuf) */
+	uint16		nextlen;		/* Next Read Len from last header */
+	uint8		rx_seq;			/* Receive sequence number (expected) */
+	bool		rxskip;			/* Skip receive (awaiting NAK ACK) */
+
+	void		*glomd;			/* Packet containing glomming descriptor */
+	void		*glom;			/* Packet chain for glommed superframe */
+	uint		glomerr;		/* Glom packet read errors */
+
+	uint8		*rxbuf;			/* Buffer for receiving control packets */
+	uint		rxblen;			/* Allocated length of rxbuf */
+	uint8		*rxctl;			/* Aligned pointer into rxbuf */
+	uint8		*databuf;		/* Buffer for receiving big glom packet */
+	uint8		*dataptr;		/* Aligned pointer into databuf */
+	uint		rxlen;			/* Length of valid data in buffer */
+
+	uint8		sdpcm_ver;		/* Bus protocol reported by dongle */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint 		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+	uint		spurious;		/* Count of spurious interrupts */
+	uint		pollrate;		/* Ticks between device polls */
+	uint		polltick;		/* Tick counter */
+	uint		pollcnt;		/* Count of active polls */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	uint		regfails;		/* Count of R_REG/W_REG failures */
+
+	uint		clkstate;		/* State of sd and backplane clock(s) */
+	bool		activity;		/* Activity flag for clock down */
+	int32		idletime;		/* Control for activity timeout */
+	int32		idlecount;		/* Activity timeout counter */
+	int32		idleclock;		/* How to set bus driver when idle */
+	int32		sd_divisor;		/* Speed control to bus driver */
+	int32		sd_mode;		/* Mode control to bus driver */
+	int32		sd_rxchain;		/* If bcmsdh api accepts PKT chains */
+	bool		use_rxchain;		/* If dhd should use PKT chains */
+	bool		sleeping;		/* Is SDIO bus sleeping? */
+	bool		rxflow_mode;	/* Rx flow control mode */
+	bool		rxflow;			/* Is rx flow control on */
+	uint		prev_rxlim_hit;		/* Is prev rx limit exceeded (per dpc schedule) */
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+	/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+	bool		usebufpool;
+
+#ifdef SDTEST
+	/* external loopback */
+	bool		ext_loop;
+	uint8		loopid;
+
+	/* pktgen configuration */
+	uint		pktgen_freq;		/* Ticks between bursts */
+	uint		pktgen_count;		/* Packets to send each burst */
+	uint		pktgen_print;		/* Bursts between count displays */
+	uint		pktgen_total;		/* Stop after this many */
+	uint		pktgen_minlen;		/* Minimum packet data len */
+	uint		pktgen_maxlen;		/* Maximum packet data len */
+	uint		pktgen_mode;		/* Configured mode: tx, rx, or echo */
+	uint		pktgen_stop;		/* Number of tx failures causing stop */
+
+	/* active pktgen fields */
+	uint		pktgen_tick;		/* Tick counter for bursts */
+	uint		pktgen_ptick;		/* Burst counter for printing */
+	uint		pktgen_sent;		/* Number of test packets generated */
+	uint		pktgen_rcvd;		/* Number of test packets received */
+	uint		pktgen_fail;		/* Number of failed send attempts */
+	uint16		pktgen_len;		/* Length of next packet to send */
+#define PKTGEN_RCV_IDLE     (0)
+#define PKTGEN_RCV_ONGOING  (1)
+	uint16		pktgen_rcv_state;		/* receive state */
+	uint		pktgen_rcvd_rcvsession;	/* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+	/* Some additional counters */
+	uint		tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint		fcqueued;		/* Tx packets that got queued */
+	uint		rxrtx;			/* Count of rtx requests (NAK to dongle) */
+	uint		rx_toolong;		/* Receive frames too long to receive */
+	uint		rxc_errors;		/* SDIO errors when reading control frames */
+	uint		rx_hdrfail;		/* SDIO errors on header reads */
+	uint		rx_badhdr;		/* Bad received headers (roosync?) */
+	uint		rx_badseq;		/* Mismatched rx sequence number */
+	uint		fc_rcvd;		/* Number of flow-control events received */
+	uint		fc_xoff;		/* Number which turned on flow-control */
+	uint		fc_xon;			/* Number which turned off flow-control */
+	uint		rxglomfail;		/* Failed deglom attempts */
+	uint		rxglomframes;		/* Number of glom frames (superframes) */
+	uint		rxglompkts;		/* Number of packets from glom frames */
+	uint		f2rxhdrs;		/* Number of header reads */
+	uint		f2rxdata;		/* Number of frame data reads */
+	uint		f2txdata;		/* Number of f2 frame writes */
+	uint		f1regdata;		/* Number of f1 register accesses */
+
+	uint8		*ctrl_frame_buf;
+	uint32		ctrl_frame_len;
+	bool		ctrl_frame_stat;
+	uint32		rxint_mode;	/* rx interrupt mode */
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2	/* Not used yet */
+#define CLK_AVAIL	3
+
+#define DHD_NOPMU(dhd)	(FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+int dhd_dongle_memsize;
+
+static bool dhd_doflow;
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static const uint watermark = 8;
+static const uint firstread = DHD_FIRSTREAD;
+
+#define HDATLEN (firstread - (SDPCM_HDRLEN))
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+#define ALIGNMENT  4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align)					\
+	do {								\
+		uint datalign;						\
+		datalign = (uintptr)PKTDATA((osh), (p));		\
+		datalign = ROUNDUP(datalign, (align)) - datalign;	\
+		ASSERT(datalign < (align));				\
+		ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign));	\
+		if (datalign)						\
+			PKTPULL((osh), (p), datalign);			\
+		PKTSETLEN((osh), (p), (len));				\
+	} while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		regvar = R_REG(bus->dhd->osh, regaddr); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) { \
+			DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+			regvar = 0; \
+		} \
+	} \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		W_REG(bus->dhd->osh, regaddr, regval); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) \
+			DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+	} \
+} while (0)
+
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0:	Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1:	(sdiod core rev >= 4)
+ *		Device sets a new bit in the intstatus whenever there is a packet
+ *		available in fifo.  Host can't clear this specific status bit until all the 
+ *		packets are read from the FIFO.  No need to ack dongle intstatus.
+ * Mode 2:	(sdiod core rev >= 4)
+ *		Device sets a bit in the intstatus, and host acks this by writing
+ *		one to this bit.  Dongle won't generate anymore packet interrupts
+ *		until host reads all the packets from the dongle and reads a zero to
+ *		figure that there are no more packets.  No need to disable host ints.
+ *		Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT		0	/* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0	1	/* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1	2	/* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) 	\
+	((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS			SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus)	((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count);
+#endif
+
+#ifdef DHD_DEBUG
+static int dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size);
+static int dhdsdio_mem_dump(dhd_bus_t *bus);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+                                 void * regsva, uint16  devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+	bool reset_flag);
+
+static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+
+static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_MEMSIZE;
+	/* Restrict the memsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_memsize, min_size));
+	if ((dhd_dongle_memsize > min_size) &&
+		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_memsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+	int err = 0;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+	return err;
+}
+
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+	int err;
+	uint8 clkctl, clkreq, devctl;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(OOB_INTR_ONLY)
+	pendok = FALSE;
+#endif
+	clkctl = 0;
+	sdh = bus->sdh;
+
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+		if (pendok &&
+		    ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) {
+			uint32 dummy, retries;
+			R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+		}
+
+		/* Check current status */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			DHD_INFO(("CLKCTL: set PENDING\n"));
+			bus->clkstate = CLK_PENDING;
+			return BCME_OK;
+		} else if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+			                                    SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+			          !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+		}
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+			           __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+			return BCME_ERROR;
+		}
+
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+		if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+			if (!SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+			}
+#endif /* !defined(BCMLXSDMMC) */
+		} else {
+			if (SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+			}
+		}
+#endif /* defined (DHD_DEBUG) */
+
+		bus->activity = TRUE;
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		DHD_INFO(("CLKCTL: turned OFF\n"));
+		if (err) {
+			DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+			           __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+	}
+	return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+	int err;
+	int32 iovalue;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (on) {
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			/* Turn on clock and restore mode */
+			iovalue = 1;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			iovalue = bus->sd_mode;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_mode: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Restore clock speed */
+			iovalue = bus->sd_divisor;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_SDONLY;
+	} else {
+		/* Stop or slow the SD clock itself */
+		if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+			DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+			           __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+			return BCME_ERROR;
+		}
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			if (sd1idle) {
+				/* Change to SD1 mode and turn off clock */
+				iovalue = 1;
+				err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+				                      &iovalue, sizeof(iovalue), TRUE);
+				if (err) {
+					DHD_ERROR(("%s: error changing sd_clock: %d\n",
+					           __FUNCTION__, err));
+					return BCME_ERROR;
+				}
+			}
+
+			iovalue = 0;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Set divisor to idle value */
+			iovalue = bus->idleclock;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_NONE;
+	}
+
+	return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+	int ret = BCME_OK;
+#ifdef DHD_DEBUG
+	uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target) {
+		if (target == CLK_AVAIL) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+		}
+		return ret;
+	}
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			dhdsdio_sdclk(bus, TRUE);
+		/* Now request HT Avail on the backplane */
+		ret = dhdsdio_htclk(bus, TRUE, pendok);
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+		}
+		break;
+
+	case CLK_SDONLY:
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			ret = dhdsdio_sdclk(bus, TRUE);
+		else if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		else
+			DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+			           bus->clkstate, target));
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		}
+		break;
+
+	case CLK_NONE:
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		/* Now remove the SD clock */
+		ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+		if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+		dhd_os_wd_timer(bus->dhd, 0);
+		break;
+	}
+#ifdef DHD_DEBUG
+	DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+	return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+	          (sleep ? "SLEEP" : "WAKE"),
+	          (bus->sleeping ? "SLEEP" : "WAKE")));
+
+	/* Done if we're already in the requested state */
+	if (sleep == bus->sleeping)
+		return BCME_OK;
+
+	/* Going to sleep: set the alarm and turn off the lights... */
+	if (sleep) {
+		/* Don't sleep if something is pending */
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+			return BCME_BUSY;
+
+
+		/* Disable SDIO interrupts (no longer interested) */
+		bcmsdh_intr_disable(bus->sdh);
+
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+		/* Turn off our contribution to the HT clock request */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+		/* Isolate the bus */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+		                 SBSDIO_DEVCTL_PADS_ISO, NULL);
+
+		/* Change state */
+		bus->sleeping = TRUE;
+
+	} else {
+		/* Waking up: bus power up is ok, set local state */
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 0, NULL);
+
+		/* Force pad isolation off if possible (in case power never toggled) */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+		/* Make sure we have SD bus access */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+		/* Change state */
+		bus->sleeping = FALSE;
+
+		/* Enable interrupts again */
+		if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+			bus->intdis = FALSE;
+			bcmsdh_intr_enable(bus->sdh);
+		}
+	}
+
+	return BCME_OK;
+}
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+	bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (enable == TRUE) {
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+	} else {
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+	}
+
+	/* Turn off our contribution to the HT clock request */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+#define BUS_WAKE(bus) \
+	do { \
+		if ((bus)->sleeping) \
+			dhdsdio_bussleep((bus), FALSE); \
+	} while (0);
+
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+{
+	int ret;
+	osl_t *osh;
+	uint8 *frame;
+	uint16 len, pad1 = 0;
+	uint32 swheader;
+	uint retries = 0;
+	bcmsdh_info_t *sdh;
+	void *new;
+	int i;
+#ifdef WLMEDIA_HTSF
+	char *p;
+	htsfts_t *htsf_ts;
+#endif
+
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	sdh = bus->sdh;
+	osh = bus->dhd->osh;
+
+	if (bus->dhd->dongle_reset) {
+		ret = BCME_NOTREADY;
+		goto done;
+	}
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+	if (PKTLEN(osh, pkt) >= 100) {
+		p = PKTDATA(osh, pkt);
+		htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
+		if (htsf_ts->magic == HTSFMAGIC) {
+			htsf_ts->c20 = get_cycles();
+			htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+
+	/* Add alignment padding, allocate new packet if needed */
+	if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
+		if (PKTHEADROOM(osh, pkt) < pad1) {
+			DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
+			          __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
+			bus->dhd->tx_realloc++;
+			new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+			if (!new) {
+				DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+				           __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+				ret = BCME_NOMEM;
+				goto done;
+			}
+
+			PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+			bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+			if (free_pkt)
+				PKTFREE(osh, pkt, TRUE);
+			/* free the pkt if canned one is not used */
+			free_pkt = TRUE;
+			pkt = new;
+			frame = (uint8*)PKTDATA(osh, pkt);
+			ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+			pad1 = 0;
+		} else {
+			PKTPUSH(osh, pkt, pad1);
+			frame = (uint8*)PKTDATA(osh, pkt);
+
+			ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
+			bzero(frame, pad1 + SDPCM_HDRLEN);
+		}
+	}
+	ASSERT(pad1 < DHD_SDALIGN);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	len = (uint16)PKTLEN(osh, pkt);
+	*(uint16*)frame = htol16(len);
+	*(((uint16*)frame) + 1) = htol16(~len);
+
+	/* Software tag: channel, sequence number, data offset */
+	swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+	        (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+	htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+	tx_packets[PKTPRIO(pkt)]++;
+	if (DHD_BYTES_ON() &&
+	    (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+	      (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+		prhex("Tx Frame", frame, len);
+	} else if (DHD_HDRS_ON()) {
+		prhex("TxHdr", frame, MIN(len, 16));
+	}
+#endif
+
+	/* Raise len to next SDIO block to eliminate tail command */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad2 = bus->blocksize - (len % bus->blocksize);
+		if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
+#ifdef NOTUSED
+			if (pad2 <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+				len += pad2;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Some controllers have trouble with odd bytes -- round to even */
+	if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+		if (PKTTAILROOM(osh, pkt))
+#endif
+			len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+		else
+			DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+	}
+
+	do {
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                          frame, len, pkt, NULL, NULL);
+		bus->f2txdata++;
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+			          __FUNCTION__, ret));
+			bus->tx_sderrs++;
+
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+			                 SFC_WF_TERM, NULL);
+			bus->f1regdata++;
+
+			for (i = 0; i < 3; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+
+		}
+		if (ret == 0) {
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+		}
+	} while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+
+done:
+	/* restore pkt buffer pointer before calling tx complete routine */
+	PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1);
+#ifdef PROP_TXSTATUS
+	if (bus->dhd->wlfc_state) {
+		dhd_os_sdunlock(bus->dhd);
+		dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
+		dhd_os_sdlock(bus->dhd);
+	} else {
+#endif /* PROP_TXSTATUS */
+	dhd_txcomplete(bus->dhd, pkt, ret != 0);
+	if (free_pkt)
+		PKTFREE(osh, pkt, TRUE);
+
+#ifdef PROP_TXSTATUS
+	}
+#endif
+	return ret;
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+	int ret = BCME_ERROR;
+	osl_t *osh;
+	uint datalen, prec;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	osh = bus->dhd->osh;
+	datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+	/* Push the test header if doing loopback */
+	if (bus->ext_loop) {
+		uint8* data;
+		PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+		data = PKTDATA(osh, pkt);
+		*data++ = SDPCM_TEST_ECHOREQ;
+		*data++ = (uint8)bus->loopid++;
+		*data++ = (datalen >> 0);
+		*data++ = (datalen >> 8);
+		datalen += SDPCM_TEST_HDRLEN;
+	}
+#endif /* SDTEST */
+
+	/* Add space for the header */
+	PKTPUSH(osh, pkt, SDPCM_HDRLEN);
+	ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+#ifndef DHDTHREAD
+	/* Lock: we're about to use shared data/code (and SDIO) */
+	dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+	/* Check for existing queue, current flow-control, pending event, or pending clock */
+	if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+	    (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+	    (bus->clkstate != CLK_AVAIL)) {
+		DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+			pktq_len(&bus->txq)));
+		bus->fcqueued++;
+
+		/* Priority based enq */
+		dhd_os_sdlock_txq(bus->dhd);
+		if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
+			PKTPULL(osh, pkt, SDPCM_HDRLEN);
+#ifndef DHDTHREAD
+			/* Need to also release txqlock before releasing sdlock.
+			 * This thread still has txqlock and releases sdlock.
+			 * Deadlock happens when dpc() grabs sdlock first then
+			 * attempts to grab txqlock.
+			 */
+			dhd_os_sdunlock_txq(bus->dhd);
+			dhd_os_sdunlock(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+			if (bus->dhd->wlfc_state)
+				dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE);
+			else
+#endif
+			dhd_txcomplete(bus->dhd, pkt, FALSE);
+#ifndef DHDTHREAD
+			dhd_os_sdlock(bus->dhd);
+			dhd_os_sdlock_txq(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+			/* let the caller decide whether to free the packet */
+		if (!bus->dhd->wlfc_state)
+#endif
+			PKTFREE(osh, pkt, TRUE);
+			ret = BCME_NORESOURCE;
+		}
+		else
+			ret = BCME_OK;
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
+			dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+
+#ifdef DHD_DEBUG
+		if (pktq_plen(&bus->txq, prec) > qcount[prec])
+			qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+		/* Schedule DPC if needed to send queued packet(s) */
+		if (dhd_deferred_tx && !bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+	} else {
+#ifdef DHDTHREAD
+		/* Lock: we're about to use shared data/code (and SDIO) */
+		dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+		/* Otherwise, send it now */
+		BUS_WAKE(bus);
+		/* Make sure back plane ht clk is on, no pending allowed */
+		dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+#ifndef SDTEST
+		ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+		ret = dhdsdio_txpkt(bus, pkt,
+		        (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+		if (ret)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+#ifdef DHDTHREAD
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+	}
+
+#ifndef DHDTHREAD
+	dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+	return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+	void *pkt;
+	uint32 intstatus = 0;
+	uint retries = 0;
+	int ret = 0, prec_out;
+	uint cnt = 0;
+	uint datalen;
+	uint8 tx_prec_map;
+
+	dhd_pub_t *dhd = bus->dhd;
+	sdpcmd_regs_t *regs = bus->regs;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	tx_prec_map = ~bus->flowcontrol;
+
+	/* Send frames until the limit or some other event */
+	for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+		dhd_os_sdlock_txq(bus->dhd);
+		if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+			dhd_os_sdunlock_txq(bus->dhd);
+			break;
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+		datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
+
+#ifndef SDTEST
+		ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+		ret = dhdsdio_txpkt(bus, pkt,
+		        (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+		if (ret)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr && cnt)
+		{
+			/* Check device status, signal pending interrupt */
+			R_SDREG(intstatus, &regs->intstatus, retries);
+			bus->f2txdata++;
+			if (bcmsdh_regfail(bus->sdh))
+				break;
+			if (intstatus & bus->hostintmask)
+				bus->ipend = TRUE;
+		}
+	}
+
+	/* Deflow-control stack if needed */
+	if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+	    dhd->txoff && (pktq_len(&bus->txq) < FCLOW))
+		dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+
+	return cnt;
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	uint8 *frame;
+	uint16 len;
+	uint32 swheader;
+	uint retries = 0;
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint8 doff = 0;
+	int ret = -1;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Back the pointer to make a room for bus header */
+	frame = msg - SDPCM_HDRLEN;
+	len = (msglen += SDPCM_HDRLEN);
+
+	/* Add alignment padding (optional for ctl frames) */
+	if (dhd_alignctl) {
+		if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+			frame -= doff;
+			len += doff;
+			msglen += doff;
+			bzero(frame, doff + SDPCM_HDRLEN);
+		}
+		ASSERT(doff < DHD_SDALIGN);
+	}
+	doff += SDPCM_HDRLEN;
+
+	/* Round send length to next SDIO block */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+			len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (len & (ALIGNMENT - 1)))
+		len = ROUNDUP(len, ALIGNMENT);
+
+	ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+	/* Need to lock here to protect txseq and SDIO tx calls */
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	*(uint16*)frame = htol16((uint16)msglen);
+	*(((uint16*)frame) + 1) = htol16(~msglen);
+
+	/* Software tag: channel, sequence number, data offset */
+	swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+	        | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+	htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+	if (!DATAOK(bus)) {
+		DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+			__FUNCTION__, bus->tx_max, bus->tx_seq));
+		bus->ctrl_frame_stat = TRUE;
+		/* Send from dpc */
+		bus->ctrl_frame_buf = frame;
+		bus->ctrl_frame_len = len;
+
+		dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+
+		if (bus->ctrl_frame_stat == FALSE) {
+			DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+			ret = 0;
+		} else {
+			DHD_ERROR(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__));
+			ret = -1;
+			bus->ctrl_frame_stat = FALSE;
+			goto done;
+		}
+	}
+
+	if (ret == -1) {
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+			prhex("Tx Frame", frame, len);
+		} else if (DHD_HDRS_ON()) {
+			prhex("TxHdr", frame, MIN(len, 16));
+		}
+#endif
+
+		do {
+			ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+			                          frame, len, NULL, NULL, NULL);
+			ASSERT(ret != BCME_PENDING);
+
+			if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+				DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+				          __FUNCTION__, ret));
+				bus->tx_sderrs++;
+
+				bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+				                 SFC_WF_TERM, NULL);
+				bus->f1regdata++;
+
+				for (i = 0; i < 3; i++) {
+					uint8 hi, lo;
+					hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					                     SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+					lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					                     SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+					bus->f1regdata += 2;
+					if ((hi == 0) && (lo == 0))
+						break;
+				}
+
+			}
+			if (ret == 0) {
+				bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+			}
+		} while ((ret < 0) && retries++ < TXRETRIES);
+	}
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (ret)
+		bus->dhd->tx_ctlerrs++;
+	else
+		bus->dhd->tx_ctlpkts++;
+
+	return ret ? -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+	dhd_os_sdlock(bus->dhd);
+	rxlen = bus->rxlen;
+	bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+	bus->rxlen = 0;
+	dhd_os_sdunlock(bus->dhd);
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+		         __FUNCTION__, rxlen, msglen));
+	} else if (timeleft == 0) {
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	} else if (pending == TRUE) {
+		DHD_CTL(("%s: canceled\n", __FUNCTION__));
+		return -ERESTARTSYS;
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	}
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	return rxlen ? (int)rxlen : -ETIMEDOUT;
+}
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_POLLRATE,
+	IOV_SDREG,
+	IOV_SBREG,
+	IOV_SDCIS,
+	IOV_MEMBYTES,
+	IOV_MEMSIZE,
+#ifdef DHD_DEBUG
+	IOV_CHECKDIED,
+	IOV_SERIALCONS,
+#endif
+	IOV_DOWNLOAD,
+	IOV_SOCRAM_STATE,
+	IOV_FORCEEVEN,
+	IOV_SDIOD_DRIVE,
+	IOV_READAHEAD,
+	IOV_SDRXCHAIN,
+	IOV_ALIGNCTL,
+	IOV_SDALIGN,
+	IOV_DEVRESET,
+	IOV_CPU,
+#ifdef SDTEST
+	IOV_PKTGEN,
+	IOV_EXTLOOP,
+#endif /* SDTEST */
+	IOV_SPROM,
+	IOV_TXBOUND,
+	IOV_RXBOUND,
+	IOV_TXMINMAX,
+	IOV_IDLETIME,
+	IOV_IDLECLOCK,
+	IOV_SD1IDLE,
+	IOV_SLEEP,
+	IOV_DONGLEISOLATION,
+	IOV_VARS
+#ifdef SOFTAP
+    , IOV_FWPATH
+#endif
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"sleep",	IOV_SLEEP,	0,	IOVT_BOOL,	0 },
+	{"pollrate",	IOV_POLLRATE,	0,	IOVT_UINT32,	0 },
+	{"idletime",	IOV_IDLETIME,	0,	IOVT_INT32,	0 },
+	{"idleclock",	IOV_IDLECLOCK,	0,	IOVT_INT32,	0 },
+	{"sd1idle",	IOV_SD1IDLE,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"memsize",	IOV_MEMSIZE,	0,	IOVT_UINT32,	0 },
+	{"download",	IOV_DOWNLOAD,	0,	IOVT_BOOL,	0 },
+	{"socram_state",	IOV_SOCRAM_STATE,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"sdiod_drive",	IOV_SDIOD_DRIVE, 0,	IOVT_UINT32,	0 },
+	{"readahead",	IOV_READAHEAD,	0,	IOVT_BOOL,	0 },
+	{"sdrxchain",	IOV_SDRXCHAIN,	0,	IOVT_BOOL,	0 },
+	{"alignctl",	IOV_ALIGNCTL,	0,	IOVT_BOOL,	0 },
+	{"sdalign",	IOV_SDALIGN,	0,	IOVT_BOOL,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"sdreg",	IOV_SDREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_cis",	IOV_SDCIS,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"forcealign",	IOV_FORCEEVEN,	0,	IOVT_BOOL,	0 },
+	{"txbound",	IOV_TXBOUND,	0,	IOVT_UINT32,	0 },
+	{"rxbound",	IOV_RXBOUND,	0,	IOVT_UINT32,	0 },
+	{"txminmax",	IOV_TXMINMAX,	0,	IOVT_UINT32,	0 },
+	{"cpu",		IOV_CPU,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"checkdied",	IOV_CHECKDIED,	0,	IOVT_BUFFER,	0 },
+#endif /* DHD_DEBUG  */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+	{"extloop",	IOV_EXTLOOP,	0,	IOVT_BOOL,	0 },
+	{"pktgen",	IOV_PKTGEN,	0,	IOVT_BUFFER,	sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	IOVT_UINT32,	0 },
+#ifdef SOFTAP
+	{"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+	{NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+	uint q1, q2;
+
+	if (!div) {
+		bcm_bprintf(strbuf, "%s N/A", desc);
+	} else {
+		q1 = num / div;
+		q2 = (100 * (num - (q1 * div))) / div;
+		bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+	}
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+	bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+	            bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+	bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
+	            bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+	            bus->rxlen, bus->rx_seq);
+	bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+	            bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+	bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+	            bus->pollrate, bus->pollcnt, bus->regfails);
+
+	bcm_bprintf(strbuf, "\nAdditional counters:\n");
+	bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
+	            bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+	            bus->rxc_errors);
+	bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+	            bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+	bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+	            bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+	bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+	            bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+	bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
+	            (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+	            bus->f2txdata, bus->f1regdata);
+	{
+		dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+		             bus->dhd->rx_packets);
+		dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+		             (bus->f2txdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+		bcm_bprintf(strbuf, "\n\n");
+	}
+
+#ifdef SDTEST
+	if (bus->pktgen_count) {
+		bcm_bprintf(strbuf, "pktgen config and count:\n");
+		bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n",
+		            bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+		            bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+		bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+		            bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+	}
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+	bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+	            bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+	bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+	bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+	            bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+	bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+	bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+	bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+	bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+	bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+
+	pktgen.version = DHD_PKTGEN_VERSION;
+	pktgen.freq = bus->pktgen_freq;
+	pktgen.count = bus->pktgen_count;
+	pktgen.print = bus->pktgen_print;
+	pktgen.total = bus->pktgen_total;
+	pktgen.minlen = bus->pktgen_minlen;
+	pktgen.maxlen = bus->pktgen_maxlen;
+	pktgen.numsent = bus->pktgen_sent;
+	pktgen.numrcvd = bus->pktgen_rcvd;
+	pktgen.numfail = bus->pktgen_fail;
+	pktgen.mode = bus->pktgen_mode;
+	pktgen.stop = bus->pktgen_stop;
+
+	bcopy(&pktgen, arg, sizeof(pktgen));
+
+	return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+	uint oldcnt, oldmode;
+
+	bcopy(arg, &pktgen, sizeof(pktgen));
+	if (pktgen.version != DHD_PKTGEN_VERSION)
+		return BCME_BADARG;
+
+	oldcnt = bus->pktgen_count;
+	oldmode = bus->pktgen_mode;
+
+	bus->pktgen_freq = pktgen.freq;
+	bus->pktgen_count = pktgen.count;
+	bus->pktgen_print = pktgen.print;
+	bus->pktgen_total = pktgen.total;
+	bus->pktgen_minlen = pktgen.minlen;
+	bus->pktgen_maxlen = pktgen.maxlen;
+	bus->pktgen_mode = pktgen.mode;
+	bus->pktgen_stop = pktgen.stop;
+
+	bus->pktgen_tick = bus->pktgen_ptick = 0;
+	bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+	bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+	/* Clear counts for a new pktgen (mode change, or was stopped) */
+	if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
+		bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
+
+	return 0;
+}
+#endif /* SDTEST */
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint32 sdaddr;
+	uint dsize;
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	/* Set the backplane window to include the start address */
+	if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+		DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+		goto xfer_done;
+	}
+
+	/* Do the transfer(s) */
+	while (size) {
+		DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+		          __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+		          (address & SBSDIO_SBWINDOW_MASK)));
+		if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+			DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* Adjust for next transfer (if any) */
+		if ((size -= dsize)) {
+			data += dsize;
+			address += dsize;
+			if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+				DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+				break;
+			}
+			sdaddr = 0;
+			dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+
+	}
+
+xfer_done:
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+		DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+			bcmsdh_cur_sbwad(bus->sdh)));
+	}
+
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+	uint32 addr;
+	int rv;
+
+	/* Read last word in memory to determine address of sdpcm_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, bus->ramsize - 4, (uint8 *)&addr, 4)) < 0)
+		return rv;
+
+	addr = ltoh32(addr);
+
+	DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+	/*
+	 * Check if addr is valid.
+	 * NVRAM length at the end of memory should have been overwritten.
+	 */
+	if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+		DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", __FUNCTION__, addr));
+		return BCME_ERROR;
+	}
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+		return rv;
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+		DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+		           "is different than sdpcm_shared version %d in dongle\n",
+		           __FUNCTION__, SDPCM_SHARED_VERSION,
+		           sh->flags & SDPCM_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX	192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	sdpcm_shared_t sdpcm_shared;
+	struct bcmstrbuf strbuf;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+	if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (sdpcm_shared.assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (sdpcm_shared.assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+		}
+
+		if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 sdpcm_shared.trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			"lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(sdpcm_shared.trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+		}
+	}
+
+	if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+#ifdef DHD_DEBUG
+	if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+		/* Mem dump to a file on device */
+		dhdsdio_mem_dump(bus);
+	}
+#endif /* DHD_DEBUG */
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_mem_dump(dhd_bus_t *bus)
+{
+	int ret = 0;
+	int size; /* Full mem size */
+	int start = 0; /* Start address */
+	int read_size = 0; /* Read size of each iteration */
+	uint8 *buf = NULL, *databuf = NULL;
+
+	/* Get full mem size */
+	size = bus->ramsize;
+	buf = MALLOC(bus->dhd->osh, size);
+	if (!buf) {
+		printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size);
+		return -1;
+	}
+
+	/* Read mem content */
+	printf("Dump dongle memory");
+	databuf = buf;
+	while (size)
+	{
+		read_size = MIN(MEMBLOCK, size);
+		if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size)))
+		{
+			printf("%s: Error membytes %d\n", __FUNCTION__, ret);
+			if (buf) {
+				MFREE(bus->dhd->osh, buf, size);
+			}
+			return -1;
+		}
+		printf(".");
+
+		/* Decrement size and increment start address */
+		size -= read_size;
+		start += read_size;
+		databuf += read_size;
+	}
+	printf("Done\n");
+
+	/* free buf before return !!! */
+	if (write_to_file(bus->dhd, buf, bus->ramsize))
+	{
+		printf("%s: Error writing to files\n", __FUNCTION__);
+		return -1;
+	}
+
+	/* buf free handled in write_to_file, not here */
+	return 0;
+}
+#endif /* defined(DHD_DEBUG) */
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB	(1  << 24)
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+	int int_val;
+	uint32 addr, data;
+
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+	*bcmerror = 0;
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (!set)
+		return (int_val & CC_PLL_CHIPCTRL_SERIAL_ENAB);
+	if (enable)
+		int_val |= CC_PLL_CHIPCTRL_SERIAL_ENAB;
+	else
+		int_val &= ~CC_PLL_CHIPCTRL_SERIAL_ENAB;
+	bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uint32 chipcontrol;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+		chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+		chipcontrol &= ~0x8;
+		if (enable) {
+			chipcontrol |=  0x8;
+			chipcontrol &= ~0x3;
+		}
+		bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+	}
+
+	return (int_val & CC_PLL_CHIPCTRL_SERIAL_ENAB);
+}
+#endif 
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+	/* Some ioctls use the bus */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	/* Handle sleep stuff before any clock mucking */
+	if (vi->varid == IOV_SLEEP) {
+		if (IOV_ISSET(actionid)) {
+			bcmerror = dhdsdio_bussleep(bus, bool_val);
+		} else {
+			int_val = (int32)bus->sleeping;
+			bcopy(&int_val, arg, val_size);
+		}
+		goto exit;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	if (!bus->dhd->dongle_reset) {
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	}
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_INTR):
+		int_val = (int32)bus->intr;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_INTR):
+		bus->intr = bool_val;
+		bus->intdis = FALSE;
+		if (bus->dhd->up) {
+			if (bus->intr) {
+				DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+				bcmsdh_intr_enable(bus->sdh);
+			} else {
+				DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+				bcmsdh_intr_disable(bus->sdh);
+			}
+		}
+		break;
+
+	case IOV_GVAL(IOV_POLLRATE):
+		int_val = (int32)bus->pollrate;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POLLRATE):
+		bus->pollrate = (uint)int_val;
+		bus->poll = (bus->pollrate != 0);
+		break;
+
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_IDLECLOCK):
+		int_val = (int32)bus->idleclock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLECLOCK):
+		bus->idleclock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SD1IDLE):
+		int_val = (int32)sd1idle;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SD1IDLE):
+		sd1idle = bool_val;
+		break;
+
+
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+		          (set ? "write" : "read"), size, address));
+
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+		{
+			uint8 enable, protect;
+			si_socdevram(bus->sih, FALSE, &enable, &protect);
+			if (!enable || protect) {
+				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+					__FUNCTION__, bus->orig_ramsize, size, address));
+				DHD_ERROR(("%s: socram enable %d, protect %d\n",
+					__FUNCTION__, enable, protect));
+				bcmerror = BCME_BADARG;
+				break;
+			}
+			if (enable && (bus->sih->chip == BCM4330_CHIP_ID)) {
+				uint32 devramsize = si_socdevram_size(bus->sih);
+				if ((address < SOCDEVRAM_4330_ARM_ADDR) ||
+					(address + size > (SOCDEVRAM_4330_ARM_ADDR + devramsize))) {
+					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+						__FUNCTION__, address, size));
+					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+						__FUNCTION__, SOCDEVRAM_4330_ARM_ADDR, devramsize));
+					bcmerror = BCME_BADARG;
+					break;
+				}
+				/* move it such that address is real now */
+				address -= SOCDEVRAM_4330_ARM_ADDR;
+				address += SOCDEVRAM_4330_BP_ADDR;
+				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+					__FUNCTION__, (set ? "write" : "read"), size, address));
+			}
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_MEMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_SDIOD_DRIVE):
+		int_val = (int32)dhd_sdiod_drive_strength;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDIOD_DRIVE):
+		dhd_sdiod_drive_strength = int_val;
+		si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+		break;
+
+	case IOV_SVAL(IOV_DOWNLOAD):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_SOCRAM_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdsdio_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_GVAL(IOV_READAHEAD):
+		int_val = (int32)dhd_readahead;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_READAHEAD):
+		if (bool_val && !dhd_readahead)
+			bus->nextlen = 0;
+		dhd_readahead = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDRXCHAIN):
+		int_val = (int32)bus->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDRXCHAIN):
+		if (bool_val && !bus->sd_rxchain)
+			bcmerror = BCME_UNSUPPORTED;
+		else
+			bus->use_rxchain = bool_val;
+		break;
+	case IOV_GVAL(IOV_ALIGNCTL):
+		int_val = (int32)dhd_alignctl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_ALIGNCTL):
+		dhd_alignctl = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDALIGN):
+		int_val = DHD_SDALIGN;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_VARS):
+		if (bus->varsz < (uint)len)
+			bcopy(bus->vars, arg, bus->varsz);
+		else
+			bcmerror = BCME_BUFTOOSHORT;
+		break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uintptr)bus->regs + sd_ptr->offset;
+		size = sd_ptr->func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uintptr)bus->regs + sd_ptr->offset;
+		size = sd_ptr->func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	/* Same as above, but offset is not backplane (not SDIO core) */
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	case IOV_GVAL(IOV_SDCIS):
+	{
+		*(char *)arg = 0;
+
+		bcmstrcat(arg, "\nFunc 0\n");
+		bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 1\n");
+		bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 2\n");
+		bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		break;
+	}
+
+	case IOV_GVAL(IOV_FORCEEVEN):
+		int_val = (int32)forcealign;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FORCEEVEN):
+		forcealign = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TXMINMAX):
+		int_val = (int32)dhd_txminmax;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXMINMAX):
+		dhd_txminmax = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_SERIALCONS):
+		int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+		if (bcmerror != 0)
+			break;
+
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SERIALCONS):
+		dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+		break;
+
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+	case IOV_GVAL(IOV_EXTLOOP):
+		int_val = (int32)bus->ext_loop;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_EXTLOOP):
+		bus->ext_loop = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_get(bus, arg);
+		break;
+
+	case IOV_SVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_set(bus, arg);
+		break;
+#endif /* SDTEST */
+
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_SVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+		           __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+		           bus->dhd->busstate));
+
+		ASSERT(bus->dhd->osh);
+		/* ASSERT(bus->cl_devid); */
+
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+		break;
+#ifdef SOFTAP
+        case IOV_GVAL(IOV_FWPATH):
+        {
+            uint32  fw_path_len;
+    
+            fw_path_len = strlen(bus->fw_path);
+    
+            DHD_INFO(("[softap] get fwpath, l=%d\n", len));
+    
+            if ( fw_path_len > len-1 ) {
+                bcmerror = BCME_BUFTOOSHORT;
+                break;
+            }
+    
+            if ( fw_path_len )
+                bcopy(bus->fw_path, arg, fw_path_len);
+    
+            ((uchar*)arg)[fw_path_len] = 0;
+        }
+        break;
+    
+        case IOV_SVAL(IOV_FWPATH):
+            {
+                DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val));
+    
+                switch(int_val) {
+                case 1:
+                    bus->fw_path = fw_path; /* ordinary one */
+                    break;
+                case 2:
+                    bus->fw_path = fw_path2;
+                    break;
+                default:
+                    bcmerror = BCME_BADARG;
+                    return bcmerror;
+                }
+                
+                DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0]?bus->fw_path:"NULL")));
+            }
+            break;
+#endif
+	case IOV_GVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+		/* Get its status */
+		int_val = (bool) bus->dhd->dongle_reset;
+		bcopy(&int_val, arg, val_size);
+
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == FALSE)
+		dhd_preinit_ioctls((dhd_pub_t *) bus->dhd);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	if (bus->vars) {
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+			if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+				DHD_ERROR(("PR85623WAR in place\n"));
+				varsize += 4;
+				varaddr -= 4;
+			}
+		}
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+
+		/* Write the vars list */
+		bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		bus->orig_ramsize, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((bus->orig_ramsize - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdsdio_membytes(bus, TRUE, (bus->orig_ramsize - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+	uint retries;
+	int bcmerror = 0;
+
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		si_core_disable(bus->sih, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Clear the top bit of memory */
+		if (bus->ramsize) {
+			uint32 zeros = 0;
+			if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4) < 0) {
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+		}
+	} else {
+		if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if (!si_iscoreup(bus->sih)) {
+			DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if ((bcmerror = dhdsdio_write_vars(bus))) {
+			DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+			goto fail;
+		}
+
+		if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+		    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+			DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+		W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to SDIOD core */
+	if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+		si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+		dhd_os_sdlock(bus->dhd);
+
+		BUS_WAKE(bus);
+
+		/* Turn on clock in case SD command needs backplane */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+		/* Check for bus configuration changes of interest */
+
+		/* If it was divisor change, read the new one */
+		if (set && strcmp(name, "sd_divisor") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_divisor = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_divisor));
+			}
+		}
+		/* If it was a mode change, read the new one */
+		if (set && strcmp(name, "sd_mode") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_mode = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_mode));
+			}
+		}
+		/* Similar check for blocksize change */
+		if (set && strcmp(name, "sd_blocksize") == 0) {
+			int32 fnum = 2;
+			if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+			                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, "sd_blocksize", bus->blocksize));
+			}
+		}
+		bus->roundup = MIN(max_roundup, bus->blocksize);
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	osl_t *osh;
+	uint32 local_hostintmask;
+	uint8 saveclk;
+	uint retries;
+	int err;
+	if (!bus->dhd)
+		return;
+
+	osh = bus->dhd->osh;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_waitlockfree(NULL);
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Change our idea of bus state */
+	bus->dhd->busstate = DHD_BUS_DOWN;
+
+	/* Enable clock for device interrupts */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Disable and clear interrupts at the chip level also */
+	W_SDREG(0, &bus->regs->hostintmask, retries);
+	local_hostintmask = bus->hostintmask;
+	bus->hostintmask = 0;
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+	}
+
+	/* Turn off the bus (F2), free any pending packets */
+	DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	bcmsdh_intr_disable(bus->sdh);
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	/* Clear any pending interrupts now that F2 is disabled */
+	W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+
+	/* Turn off the backplane clock (only) */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+	/* Clear the data packet queues */
+	pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		PKTFREE(osh, bus->glomd, FALSE);
+
+	if (bus->glom)
+		PKTFREE(osh, bus->glom, FALSE);
+
+	bus->glom = bus->glomd = NULL;
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = FALSE;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	dhd_timeout_t tmo;
+	uint retries = 0;
+	uint8 ready, enable;
+	int err, ret = 0;
+	uint8 saveclk;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (bus->clkstate != CLK_AVAIL) {
+		DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+		goto exit;
+	}
+
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+	        &bus->regs->tosbmailboxdata, retries);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+	/* Give the dongle some time to do its thing and set IOR2 */
+	dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+	ready = 0;
+	while (ready != enable && !dhd_timeout_expired(&tmo))
+	        ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+
+	DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+	          __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (ready == enable) {
+		/* Make sure we're talking to the core. */
+		if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+			bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+		ASSERT(bus->regs != NULL);
+
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		/* corerev 4 could use the newer interrupt logic to detect the frames */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+			(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+			bus->hostintmask &= ~I_HMB_FRAME_IND;
+			bus->hostintmask |= I_XMTDATA_AVAIL;
+		}
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err);
+
+		/* Set bus state according to enable result */
+		dhdp->busstate = DHD_BUS_DATA;
+
+		/* bcmsdh_intr_unmask(bus->sdh); */
+
+		bus->intdis = FALSE;
+		if (bus->intr) {
+			DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+			bcmsdh_intr_enable(bus->sdh);
+		} else {
+			DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+			bcmsdh_intr_disable(bus->sdh);
+		}
+
+	}
+
+
+	else {
+		/* Disable F2 again */
+		enable = SDIO_FUNC_ENABLE_1;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+	}
+
+	/* Restore previous clock setting */
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+
+	/* If we didn't come up, turn off backplane clock */
+	if (dhdp->busstate != DHD_BUS_DATA)
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+
+	return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+	uint16 lastrbc;
+	uint8 hi, lo;
+	int err;
+
+	DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+	           (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+	if (abort) {
+		bcmsdh_abort(sdh, SDIO_FUNC_2);
+	}
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+	bus->f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+		lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+		bus->f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+			           __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries) {
+		DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+	} else {
+		DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+	}
+
+	if (rtx) {
+		bus->rxrtx++;
+		W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+		bus->f1regdata++;
+		if (retries <= retry_limit) {
+			bus->rxskip = TRUE;
+		}
+	}
+
+	/* Clear partial in any case */
+	bus->nextlen = 0;
+
+	/* If we can't reach the device, signal failure */
+	if (err || bcmsdh_regfail(sdh))
+		bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint rdlen, pad;
+
+	int sdret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Control data already received in aligned rxctl */
+	if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+		goto gotpkt;
+
+	ASSERT(bus->rxbuf);
+	/* Set rxctl for frame (w/optional alignment) */
+	bus->rxctl = bus->rxbuf;
+	if (dhd_alignctl) {
+		bus->rxctl += firstread;
+		if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+			bus->rxctl += (DHD_SDALIGN - pad);
+		bus->rxctl -= firstread;
+	}
+	ASSERT(bus->rxctl >= bus->rxbuf);
+
+	/* Copy the already-read portion over */
+	bcopy(hdr, bus->rxctl, firstread);
+	if (len <= firstread)
+		goto gotpkt;
+
+	/* Copy the full data pkt in gSPI case and process ioctl. */
+	if (bus->bus == SPI_BUS) {
+		bcopy(hdr, bus->rxctl, len);
+		goto gotpkt;
+	}
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - firstread;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->dhd->maxctl))
+			rdlen += pad;
+	} else if (rdlen % DHD_SDALIGN) {
+		rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (rdlen & (ALIGNMENT - 1)))
+		rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + firstread) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+		           __FUNCTION__, rdlen, bus->dhd->maxctl));
+		bus->dhd->rx_errors++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+	if ((len - doff) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+		           __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+		bus->dhd->rx_errors++; bus->rx_toolong++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+
+	/* Read remainder of frame body into the rxctl buffer */
+	sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+	                            (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+	bus->f2rxdata++;
+	ASSERT(sdret != BCME_PENDING);
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+		bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+		dhdsdio_rxfail(bus, TRUE, TRUE);
+		goto done;
+	}
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+	if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+		prhex("RxCtrl", bus->rxctl, len);
+	}
+#endif
+
+	/* Point to valid data and indicate its length */
+	bus->rxctl += doff;
+	bus->rxlen = len - doff;
+
+done:
+	/* Awake any waiters */
+	dhd_os_ioctl_resp_wake(bus->dhd);
+}
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+	uint16 dlen, totlen;
+	uint8 *dptr, num = 0;
+
+	uint16 sublen, check;
+	void *pfirst, *plast, *pnext, *save_pfirst;
+	osl_t *osh = bus->dhd->osh;
+
+	int errcode;
+	uint8 chan, seq, doff, sfdoff;
+	uint8 txmax;
+
+	int ifidx = 0;
+	bool usechain = bus->use_rxchain;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		dhd_os_sdlock_rxq(bus->dhd);
+
+		pfirst = plast = pnext = NULL;
+		dlen = (uint16)PKTLEN(osh, bus->glomd);
+		dptr = PKTDATA(osh, bus->glomd);
+		if (!dlen || (dlen & 1)) {
+			DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+			           __FUNCTION__, dlen));
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = ltoh16_ua(dptr);
+			dlen -= sizeof(uint16);
+			dptr += sizeof(uint16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+				           __FUNCTION__, num, sublen));
+				pnext = NULL;
+				break;
+			}
+			if (sublen % DHD_SDALIGN) {
+				DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+				           __FUNCTION__, sublen, DHD_SDALIGN));
+				usechain = FALSE;
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total is a block multiple */
+			if (!dlen) {
+				sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+				totlen = ROUNDUP(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				           __FUNCTION__, num, sublen));
+				break;
+			}
+			ASSERT(!PKTLINK(pnext));
+			if (!pfirst) {
+				ASSERT(!plast);
+				pfirst = plast = pnext;
+			} else {
+				ASSERT(plast);
+				PKTSETNEXT(osh, plast, pnext);
+				plast = pnext;
+			}
+
+			/* Adhere to start alignment requirements */
+			PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+		}
+
+		/* If all allocations succeeded, save packet chain in bus structure */
+		if (pnext) {
+			DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+			          __FUNCTION__, totlen, num));
+			if (DHD_GLOM_ON() && bus->nextlen) {
+				if (totlen != bus->nextlen) {
+					DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+					          "rxseq %d\n", __FUNCTION__, bus->nextlen,
+					          totlen, rxseq));
+				}
+			}
+			bus->glom = pfirst;
+			pfirst = pnext = NULL;
+		} else {
+			if (pfirst)
+				PKTFREE(osh, pfirst, FALSE);
+			bus->glom = NULL;
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		PKTFREE(osh, bus->glomd, FALSE);
+		bus->glomd = NULL;
+		bus->nextlen = 0;
+
+		dhd_os_sdunlock_rxq(bus->dhd);
+	}
+
+	/* Ok -- either we just generated a packet chain, or had one from before */
+	if (bus->glom) {
+		if (DHD_GLOM_ON()) {
+			DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+			for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
+				          pnext, (uint8*)PKTDATA(osh, pnext),
+				          PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+			}
+		}
+
+		pfirst = bus->glom;
+		dlen = (uint16)pkttotlen(osh, pfirst);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		if (usechain) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+			                              dlen, pfirst, NULL, NULL);
+		} else if (bus->dataptr) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, bus->dataptr,
+			                              dlen, NULL, NULL, NULL);
+			sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+			if (sublen != dlen) {
+				DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+				           __FUNCTION__, dlen, sublen));
+				errcode = -1;
+			}
+			pnext = NULL;
+		} else {
+			DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+			errcode = -1;
+		}
+		bus->f2rxdata++;
+		ASSERT(errcode != BCME_PENDING);
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+			           __FUNCTION__, dlen, errcode));
+			bus->dhd->rx_errors++;
+
+			if (bus->glomerr++ < 3) {
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			return 0;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_GLOM_ON()) {
+			prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+			      MIN(PKTLEN(osh, pfirst), 48));
+		}
+#endif
+
+
+		/* Validate the superframe header */
+		dptr = (uint8 *)PKTDATA(osh, pfirst);
+		sublen = ltoh16_ua(dptr);
+		check = ltoh16_ua(dptr + sizeof(uint16));
+
+		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+		bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+		errcode = 0;
+		if ((uint16)~(sublen^check)) {
+			DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, sublen, check));
+			errcode = -1;
+		} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+			DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+			           __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+			errcode = -1;
+		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+			DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+			           SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+			errcode = -1;
+		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+			DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+			errcode = -1;
+		} else if ((doff < SDPCM_HDRLEN) ||
+		           (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+			           __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN));
+			errcode = -1;
+		}
+
+		/* Check sequence number of superframe SW header */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+			          __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_seq + 2;
+		}
+		bus->tx_max = txmax;
+
+		/* Remove superframe header, remember offset */
+		PKTPULL(osh, pfirst, doff);
+		sfdoff = doff;
+
+		/* Validate all the subframe headers */
+		for (num = 0, pnext = pfirst; pnext && !errcode;
+		     num++, pnext = PKTNEXT(osh, pnext)) {
+			dptr = (uint8 *)PKTDATA(osh, pnext);
+			dlen = (uint16)PKTLEN(osh, pnext);
+			sublen = ltoh16_ua(dptr);
+			check = ltoh16_ua(dptr + sizeof(uint16));
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				prhex("subframe", dptr, 32);
+			}
+#endif
+
+			if ((uint16)~(sublen^check)) {
+				DHD_ERROR(("%s (subframe %d): HW hdr error: "
+				           "len/check 0x%04x/0x%04x\n",
+				           __FUNCTION__, num, sublen, check));
+				errcode = -1;
+			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+				DHD_ERROR(("%s (subframe %d): length mismatch: "
+				           "len 0x%04x, expect 0x%04x\n",
+				           __FUNCTION__, num, sublen, dlen));
+				errcode = -1;
+			} else if ((chan != SDPCM_DATA_CHANNEL) &&
+			           (chan != SDPCM_EVENT_CHANNEL)) {
+				DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+				           __FUNCTION__, num, chan));
+				errcode = -1;
+			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+				DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+				           __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+				errcode = -1;
+			}
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request a couple retries */
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				PKTPUSH(osh, pfirst, sfdoff);
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			bus->nextlen = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+		save_pfirst = pfirst;
+		bus->glom = NULL;
+		plast = NULL;
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+			pnext = PKTNEXT(osh, pfirst);
+			PKTSETNEXT(osh, pfirst, NULL);
+
+			dptr = (uint8 *)PKTDATA(osh, pfirst);
+			sublen = ltoh16_ua(dptr);
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+			          __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+			          PKTLEN(osh, pfirst), sublen, chan, seq));
+
+			ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+			if (rxseq != seq) {
+				DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Subframe Data", dptr, dlen);
+			}
+#endif
+
+			PKTSETLEN(osh, pfirst, sublen);
+			PKTPULL(osh, pfirst, doff);
+
+			if (PKTLEN(osh, pfirst) == 0) {
+				PKTFREE(bus->dhd->osh, pfirst, FALSE);
+				if (plast) {
+					PKTSETNEXT(osh, plast, pnext);
+				} else {
+					ASSERT(save_pfirst == pfirst);
+					save_pfirst = pnext;
+				}
+				continue;
+			} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) != 0) {
+				DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+				bus->dhd->rx_errors++;
+				PKTFREE(osh, pfirst, FALSE);
+				if (plast) {
+					PKTSETNEXT(osh, plast, pnext);
+				} else {
+					ASSERT(save_pfirst == pfirst);
+					save_pfirst = pnext;
+				}
+				continue;
+			}
+
+			/* this packet will go up, link back into chain and count it */
+			PKTSETNEXT(osh, pfirst, pnext);
+			plast = pfirst;
+			num++;
+
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+				          __FUNCTION__, num, pfirst,
+				          PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+				          PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+				prhex("", (uint8 *)PKTDATA(osh, pfirst),
+				      MIN(PKTLEN(osh, pfirst), 32));
+			}
+#endif /* DHD_DEBUG */
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+		if (num) {
+			dhd_os_sdunlock(bus->dhd);
+			dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num, 0);
+			dhd_os_sdlock(bus->dhd);
+		}
+
+		bus->rxglomframes++;
+		bus->rxglompkts += num;
+	}
+	return num;
+}
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+	osl_t *osh = bus->dhd->osh;
+	bcmsdh_info_t *sdh = bus->sdh;
+
+	uint16 len, check;	/* Extracted hardware header fields */
+	uint8 chan, seq, doff;	/* Extracted software header fields */
+	uint8 fcbits;		/* Extracted fcbits from software header */
+	uint8 delta;
+
+	void *pkt;	/* Packet for event or data frames */
+	uint16 pad;	/* Number of pad bytes to read */
+	uint16 rdlen;	/* Total number of bytes to read */
+	uint8 rxseq;	/* Next sequence number to expect */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int sdret;	/* Return code from bcmsdh calls */
+	uint8 txmax;	/* Maximum tx sequence offered */
+	bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+	uint8 *rxbuf;
+	int ifidx = 0;
+	uint rxcount = 0; /* Total frames read */
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+	bool sdtest = FALSE;	/* To limit message spew from test mode */
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(maxframes);
+
+#ifdef SDTEST
+	/* Allow pktgen to override maxframes */
+	if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+		maxframes = bus->pktgen_count;
+		sdtest = TRUE;
+	}
+#endif
+
+	/* Not finished unless we encounter no more frames indication */
+	*finished = FALSE;
+
+
+	for (rxseq = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+	     rxseq++, rxleft--) {
+
+		/* Handle glomming separately */
+		if (bus->glom || bus->glomd) {
+			uint8 cnt;
+			DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+			          __FUNCTION__, bus->glomd, bus->glom));
+			cnt = dhdsdio_rxglom(bus, rxseq);
+			DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+			rxseq += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		/* Try doing single read if we can */
+		if (dhd_readahead && bus->nextlen) {
+			uint16 nextlen = bus->nextlen;
+			bus->nextlen = 0;
+
+			if (bus->bus == SPI_BUS) {
+				rdlen = len = nextlen;
+			}
+			else {
+				rdlen = len = nextlen << 4;
+
+				/* Pad read to blocksize for efficiency */
+				if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+					pad = bus->blocksize - (rdlen % bus->blocksize);
+					if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+						((rdlen + pad + firstread) < MAX_RX_DATASZ))
+						rdlen += pad;
+				} else if (rdlen % DHD_SDALIGN) {
+					rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+				}
+			}
+
+			/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+			 * Later we use buffer-poll for data as well as control packets.
+			 * This is required because dhd receives full frame in gSPI unlike SDIO.
+			 * After the frame is received we have to distinguish whether it is data
+			 * or non-data frame.
+			 */
+			/* Allocate a packet buffer */
+			dhd_os_sdlock_rxq(bus->dhd);
+			if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+				if (bus->bus == SPI_BUS) {
+					bus->usebufpool = FALSE;
+					bus->rxctl = bus->rxbuf;
+					if (dhd_alignctl) {
+						bus->rxctl += firstread;
+						if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+							bus->rxctl += (DHD_SDALIGN - pad);
+						bus->rxctl -= firstread;
+					}
+					ASSERT(bus->rxctl >= bus->rxbuf);
+					rxbuf = bus->rxctl;
+					/* Read the entire frame */
+					sdret = dhd_bcmsdh_recv_buf(bus,
+					                            bcmsdh_cur_sbwad(sdh),
+					                            SDIO_FUNC_2,
+					                            F2SYNC, rxbuf, rdlen,
+					                            NULL, NULL, NULL);
+					bus->f2rxdata++;
+					ASSERT(sdret != BCME_PENDING);
+
+
+					/* Control frame failures need retransmission */
+					if (sdret < 0) {
+						DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+						   __FUNCTION__, rdlen, sdret));
+						/* dhd.rx_ctlerrs is higher level */
+						bus->rxc_errors++;
+						dhd_os_sdunlock_rxq(bus->dhd);
+						dhdsdio_rxfail(bus, TRUE,
+						    (bus->bus == SPI_BUS) ? FALSE : TRUE);
+						continue;
+					}
+				} else {
+					/* Give up on data, request rtx of events */
+					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+					           "expected rxseq %d\n",
+					           __FUNCTION__, len, rdlen, rxseq));
+					/* Just go try again w/normal header read */
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			} else {
+				if (bus->bus == SPI_BUS)
+					bus->usebufpool = TRUE;
+
+				ASSERT(!PKTLINK(pkt));
+				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+				rxbuf = (uint8 *)PKTDATA(osh, pkt);
+				/* Read the entire frame */
+				sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+				                            SDIO_FUNC_2,
+				                            F2SYNC, rxbuf, rdlen,
+				                            pkt, NULL, NULL);
+				bus->f2rxdata++;
+				ASSERT(sdret != BCME_PENDING);
+
+				if (sdret < 0) {
+					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+					   __FUNCTION__, rdlen, sdret));
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+					bus->dhd->rx_errors++;
+					dhd_os_sdunlock_rxq(bus->dhd);
+					/* Force retry w/normal header read.  Don't attempt NAK for
+					 * gSPI
+					 */
+					dhdsdio_rxfail(bus, TRUE,
+					      (bus->bus == SPI_BUS) ? FALSE : TRUE);
+					continue;
+				}
+			}
+			dhd_os_sdunlock_rxq(bus->dhd);
+
+			/* Now check the header */
+			bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+			/* Extract hardware header fields */
+			len = ltoh16_ua(bus->rxhdr);
+			check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+			/* All zeros means readahead info was bad */
+			if (!(len|check)) {
+				DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+				           __FUNCTION__));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate check bytes */
+			if ((uint16)~(len^check)) {
+				DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+				           " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+				           len, check));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rx_badhdr++;
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate frame length */
+			if (len < SDPCM_HDRLEN) {
+				DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+				           __FUNCTION__, len));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Check for consistency with readahead info */
+				len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+			if (len_consistent) {
+				/* Mismatch, force retry w/normal header (may be >4K) */
+				DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+				           "expected rxseq %d\n",
+				           __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+
+			/* Extract software header fields */
+			chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+				bus->nextlen =
+				         bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+				if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+					DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+					          " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+					          seq));
+					bus->nextlen = 0;
+				}
+
+				bus->dhd->rx_readahead_cnt ++;
+			/* Handle Flow Control */
+			fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+			delta = 0;
+			if (~bus->flowcontrol & fcbits) {
+				bus->fc_xoff++;
+				delta = 1;
+			}
+			if (bus->flowcontrol & ~fcbits) {
+				bus->fc_xon++;
+				delta = 1;
+			}
+
+			if (delta) {
+				bus->fc_rcvd++;
+				bus->flowcontrol = fcbits;
+			}
+
+			/* Check and update sequence number */
+			if (rxseq != seq) {
+				DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+			/* Check window for sanity */
+			if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+					DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+						__FUNCTION__, txmax, bus->tx_seq));
+					txmax = bus->tx_seq + 2;
+			}
+			bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Data", rxbuf, len);
+			} else if (DHD_HDRS_ON()) {
+				prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+			}
+#endif
+
+			if (chan == SDPCM_CONTROL_CHANNEL) {
+				if (bus->bus == SPI_BUS) {
+					dhdsdio_read_control(bus, rxbuf, len, doff);
+					if (bus->usebufpool) {
+						dhd_os_sdlock_rxq(bus->dhd);
+						PKTFREE(bus->dhd->osh, pkt, FALSE);
+						dhd_os_sdunlock_rxq(bus->dhd);
+					}
+					continue;
+				} else {
+					DHD_ERROR(("%s (nextlen): readahead on control"
+					           " packet %d?\n", __FUNCTION__, seq));
+					/* Force retry w/normal header read */
+					bus->nextlen = 0;
+					dhdsdio_rxfail(bus, FALSE, TRUE);
+					dhd_os_sdlock_rxq(bus->dhd);
+					PKTFREE2();
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			}
+
+			if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+				DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+				           "rx pktbuf's or not yet malloced.\n", len, chan));
+				continue;
+			}
+
+			/* Validate data offset */
+			if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+				DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+				           __FUNCTION__, doff, len, SDPCM_HDRLEN));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				ASSERT(0);
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				continue;
+			}
+
+			/* All done with this one -- now deliver the packet */
+			goto deliver;
+		}
+		/* gSPI frames should not be handled in fractions */
+		if (bus->bus == SPI_BUS) {
+			break;
+		}
+
+		/* Read frame header (hardware and software) */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            bus->rxhdr, firstread, NULL, NULL, NULL);
+		bus->f2rxhdrs++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+			bus->rx_hdrfail++;
+			dhdsdio_rxfail(bus, TRUE, TRUE);
+			continue;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+			prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Extract hardware header fields */
+		len = ltoh16_ua(bus->rxhdr);
+		check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+		/* All zeros means no more frames */
+		if (!(len|check)) {
+			*finished = TRUE;
+			break;
+		}
+
+		/* Validate check bytes */
+		if ((uint16)~(len^check)) {
+			DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, len, check));
+			bus->rx_badhdr++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Validate frame length */
+		if (len < SDPCM_HDRLEN) {
+			DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+			continue;
+		}
+
+		/* Extract software header fields */
+		chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		/* Validate data offset */
+		if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+			DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+			           __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+			bus->rx_badhdr++;
+			ASSERT(0);
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Save the readahead length if there is one */
+		bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+
+		/* Handle Flow Control */
+		fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		delta = 0;
+		if (~bus->flowcontrol & fcbits) {
+			bus->fc_xoff++;
+			delta = 1;
+		}
+		if (bus->flowcontrol & ~fcbits) {
+			bus->fc_xon++;
+			delta = 1;
+		}
+
+		if (delta) {
+			bus->fc_rcvd++;
+			bus->flowcontrol = fcbits;
+		}
+
+		/* Check and update sequence number */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_seq + 2;
+		}
+		bus->tx_max = txmax;
+
+		/* Call a separate function for control frames */
+		if (chan == SDPCM_CONTROL_CHANNEL) {
+			dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+			continue;
+		}
+
+		ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+		       (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+		/* Length to read */
+		rdlen = (len > firstread) ? (len - firstread) : 0;
+
+		/* May pad read to blocksize for efficiency */
+		if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+			pad = bus->blocksize - (rdlen % bus->blocksize);
+			if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+			    ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+				rdlen += pad;
+		} else if (rdlen % DHD_SDALIGN) {
+			rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+		}
+
+		/* Satisfy length-alignment requirements */
+		if (forcealign && (rdlen & (ALIGNMENT - 1)))
+			rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+		if ((rdlen + firstread) > MAX_RX_DATASZ) {
+			/* Too long -- skip this frame */
+			DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+			bus->dhd->rx_errors++; bus->rx_toolong++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+			/* Give up on data, request rtx of events */
+			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			           __FUNCTION__, rdlen, chan));
+			bus->dhd->rx_dropped++;
+			dhd_os_sdunlock_rxq(bus->dhd);
+			dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+			continue;
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		ASSERT(!PKTLINK(pkt));
+
+		/* Leave room for what we already read, and align remainder */
+		ASSERT(firstread < (PKTLEN(osh, pkt)));
+		PKTPULL(osh, pkt, firstread);
+		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+		/* Read the remaining frame data */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+		bus->f2rxdata++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+			           ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+			            ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+			continue;
+		}
+
+		/* Copy the already-read portion */
+		PKTPUSH(osh, pkt, firstread);
+		bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			prhex("Rx Data", PKTDATA(osh, pkt), len);
+		}
+#endif
+
+deliver:
+		/* Save superframe descriptor and allocate packet frame */
+		if (chan == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+				DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+				          __FUNCTION__, len));
+#ifdef DHD_DEBUG
+				if (DHD_GLOM_ON()) {
+					prhex("Glom Data", PKTDATA(osh, pkt), len);
+				}
+#endif
+				PKTSETLEN(osh, pkt, len);
+				ASSERT(doff == SDPCM_HDRLEN);
+				PKTPULL(osh, pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+			}
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		PKTSETLEN(osh, pkt, len);
+		PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+		/* Test channel packets are processed separately */
+		if (chan == SDPCM_TEST_CHANNEL) {
+			dhdsdio_testrcv(bus, pkt, seq);
+			continue;
+		}
+#endif /* SDTEST */
+
+		if (PKTLEN(osh, pkt) == 0) {
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			continue;
+		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
+			DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			continue;
+		}
+
+
+		/* Unlock during rx call */
+		dhd_os_sdunlock(bus->dhd);
+		dhd_rx_frame(bus->dhd, ifidx, pkt, 1, chan);
+		dhd_os_sdlock(bus->dhd);
+	}
+	rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+	/* Message if we hit the limit */
+	if (!rxleft && !sdtest)
+		DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+	else
+#endif /* DHD_DEBUG */
+	DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rxseq--;
+	bus->rx_seq = rxseq;
+
+	return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus = 0;
+	uint32 hmb_data;
+	uint8 fcbits;
+	uint retries = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Read mailbox data and ack that we did so */
+	R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+	if (retries <= retry_limit)
+		W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+	bus->f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+		if (!bus->rxskip) {
+			DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+		}
+		bus->rxskip = FALSE;
+		intstatus |= FRAME_AVAIL_MASK(bus);
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+			           bus->sdpcm_ver, SDPCM_PROT_VERSION));
+		else
+			DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+		/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		    (bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+			uint32 val;
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+			val &= ~CC_XMTDATAAVAIL_MODE;
+			val |= CC_XMTDATAAVAIL_CTRL;
+			W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+		}
+
+#ifdef DHD_DEBUG
+		/* Retrieve console state address now that firmware should have updated it */
+		{
+			sdpcm_shared_t shared;
+			if (dhdsdio_readshared(bus, &shared) == 0)
+				bus->console_addr = shared.console_addr;
+		}
+#endif /* DHD_DEBUG */
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.  Leave this here for possibly remaining backward
+	 * compatible with older dongles
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->fc_xoff++;
+		if (bus->flowcontrol & ~fcbits)
+			bus->fc_xon++;
+
+		bus->fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+	                 HMB_DATA_FWHALT |
+	                 HMB_DATA_NAKHANDLED |
+	                 HMB_DATA_FC |
+	                 HMB_DATA_FWREADY |
+	                 HMB_DATA_FCDATA_MASK |
+	                 HMB_DATA_VERSION_MASK)) {
+		DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+	}
+
+	return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus, newstatus = 0;
+	uint retries = 0;
+	uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+	uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+	uint framecnt = 0;		  /* Temporary counter of tx/rx frames */
+	bool rxdone = TRUE;		  /* Flag for no more read data */
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Start with leftover status bits */
+	intstatus = bus->intstatus;
+
+	dhd_os_sdlock(bus->dhd);
+
+	/* If waiting for HTAVAIL, check status */
+	if (bus->clkstate == CLK_PENDING) {
+		int err;
+		uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+		/* Check for inconsistent device control */
+		devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		} else {
+			ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+		}
+#endif /* DHD_DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			if (err) {
+				DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			bus->clkstate = CLK_AVAIL;
+		} else {
+			goto clkwait;
+		}
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+	if (bus->clkstate != CLK_AVAIL)
+		goto clkwait;
+
+	/* Pending interrupt indicates new device status */
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata++;
+		if (bcmsdh_regfail(bus->sdh))
+			newstatus = 0;
+		newstatus &= bus->hostintmask;
+		bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+		if (newstatus) {
+			bus->f1regdata++;
+			if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+				(newstatus == I_XMTDATA_AVAIL)) {
+			}
+			else
+				W_SDREG(newstatus, &regs->intstatus, retries);
+		}
+	}
+
+	/* Merge new bits with previous */
+	intstatus |= newstatus;
+	bus->intstatus = 0;
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata += 2;
+		bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Just being here means nothing more to do for chipactive */
+	if (intstatus & I_CHIPACTIVE) {
+		/* ASSERT(bus->clkstate == CLK_AVAIL); */
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus);
+	}
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		DHD_ERROR(("Dongle reports SBINT\n"));
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip) {
+		intstatus &= ~FRAME_AVAIL_MASK(bus);
+	}
+
+	/* On frame indication, read available frames */
+	if (PKT_AVAILABLE(bus, intstatus)) {
+		framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+		if (rxdone || bus->rxskip)
+			intstatus  &= ~FRAME_AVAIL_MASK(bus);
+		rxlimit -= MIN(framecnt, rxlimit);
+	}
+
+	/* Keep still-pending events for next scheduling */
+	bus->intstatus = intstatus;
+
+clkwait:
+	/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+	 * or clock availability.  (Allows tx loop to check ipend if desired.)
+	 * (Unless register access seems hosed, as we may not be able to ACK...)
+	 */
+	if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+		DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+		          __FUNCTION__, rxdone, framecnt));
+		bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+		bcmsdh_intr_enable(sdh);
+	}
+
+	if (DATAOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))  {
+		int ret, i;
+
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                      (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+			NULL, NULL, NULL);
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+			          __FUNCTION__, ret));
+			bus->tx_sderrs++;
+
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+			                 SFC_WF_TERM, NULL);
+			bus->f1regdata++;
+
+			for (i = 0; i < 3; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+				                     SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+		}
+		if (ret == 0) {
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+		}
+
+		bus->ctrl_frame_stat = FALSE;
+		dhd_wait_event_wakeup(bus->dhd);
+	}
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+	    pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+		framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+		framecnt = dhdsdio_sendfromq(bus, framecnt);
+		txlimit -= framecnt;
+	}
+
+	/* Resched if events or tx frames are pending, else await next interrupt */
+	/* On failed register access, all bets are off: no resched or interrupts */
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+		DHD_ERROR(("%s: failed backplane access over SDIO, halting operation %d \n",
+		           __FUNCTION__, bcmsdh_regfail(sdh)));
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->intstatus = 0;
+	} else if (bus->clkstate == CLK_PENDING) {
+		/* Awaiting I_CHIPACTIVE; don't resched */
+	} else if (bus->intstatus || bus->ipend ||
+	           (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+			PKT_AVAILABLE(bus, bus->intstatus)) {  /* Read multiple frames */
+		resched = TRUE;
+	}
+
+	bus->dpc_sched = resched;
+
+	/* If we're done for now, turn off clock request. */
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+	return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched;
+
+	/* Call the DPC directly. */
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	resched = dhdsdio_dpc(bus);
+
+	return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus) {
+		DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+		return;
+	}
+	sdh = bus->sdh;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Count the interrupt call */
+	bus->intrcount++;
+	bus->ipend = TRUE;
+
+	/* Shouldn't get this interrupt if we're sleeping? */
+	if (bus->sleeping) {
+		DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+		return;
+	}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (bus->intr) {
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	} else {
+		DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+	}
+
+	bcmsdh_intr_disable(sdh);
+	bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	while (dhdsdio_dpc(bus));
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+	bus->dpc_sched = TRUE;
+	dhd_sched_dpc(bus->dhd);
+#endif 
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+	/* Default to specified length, or full range */
+	if (dhd_pktgen_len) {
+		bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+		bus->pktgen_minlen = bus->pktgen_maxlen;
+	} else {
+		bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+		bus->pktgen_minlen = 0;
+	}
+	bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+	/* Default to per-watchdog burst with 10s print time */
+	bus->pktgen_freq = 1;
+	bus->pktgen_print = 10000 / dhd_watchdog_ms;
+	bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+	/* Default to echo mode */
+	bus->pktgen_mode = DHD_PKTGEN_ECHO;
+	bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+	void *pkt;
+	uint8 *data;
+	uint pktcount;
+	uint fillbyte;
+	osl_t *osh = bus->dhd->osh;
+	uint16 len;
+
+	/* Display current count if appropriate */
+	if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+		bus->pktgen_ptick = 0;
+		printf("%s: send attempts %d rcvd %d\n",
+		       __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd);
+	}
+
+	/* For recv mode, just make sure dongle has started sending */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+			dhdsdio_sdtest_set(bus, (uint8)bus->pktgen_total);
+		}
+		return;
+	}
+
+	/* Otherwise, generate or request the specified number of packets */
+	for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+		/* Stop if total has been reached */
+		if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			break;
+		}
+
+		/* Allocate an appropriate-sized packet */
+		len = bus->pktgen_len;
+		if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+		                   TRUE))) {;
+			DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+			break;
+		}
+		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+		data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+		/* Write test header cmd and extra based on mode */
+		switch (bus->pktgen_mode) {
+		case DHD_PKTGEN_ECHO:
+			*data++ = SDPCM_TEST_ECHOREQ;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_SEND:
+			*data++ = SDPCM_TEST_DISCARD;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_RXBURST:
+			*data++ = SDPCM_TEST_BURST;
+			*data++ = (uint8)bus->pktgen_count;
+			break;
+
+		default:
+			DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+			PKTFREE(osh, pkt, TRUE);
+			bus->pktgen_count = 0;
+			return;
+		}
+
+		/* Write test header length field */
+		*data++ = (len >> 0);
+		*data++ = (len >> 8);
+
+		/* Then fill in the remainder -- N/A for burst, but who cares... */
+		for (fillbyte = 0; fillbyte < len; fillbyte++)
+			*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+			prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Send it */
+		if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) {
+			bus->pktgen_fail++;
+			if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+				bus->pktgen_count = 0;
+		}
+		bus->pktgen_sent++;
+
+		/* Bump length if not fixed, wrap at max */
+		if (++bus->pktgen_len > bus->pktgen_maxlen)
+			bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+		/* Special case for burst mode: just send one request! */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+			break;
+	}
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count)
+{
+	void *pkt;
+	uint8 *data;
+	osl_t *osh = bus->dhd->osh;
+
+	/* Allocate the packet */
+	if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) {
+		DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+		return;
+	}
+	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+	data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+	/* Fill in the test header */
+	*data++ = SDPCM_TEST_SEND;
+	*data++ = count;
+	*data++ = (bus->pktgen_maxlen >> 0);
+	*data++ = (bus->pktgen_maxlen >> 8);
+
+	/* Send it */
+	if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE))
+		bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint8 *data;
+	uint pktlen;
+
+	uint8 cmd;
+	uint8 extra;
+	uint16 len;
+	uint16 offset;
+
+	/* Check for min length */
+	if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+		PKTFREE(osh, pkt, FALSE);
+		return;
+	}
+
+	/* Extract header fields */
+	data = PKTDATA(osh, pkt);
+	cmd = *data++;
+	extra = *data++;
+	len = *data++; len += *data++ << 8;
+	DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+	/* Check length for relevant commands */
+	if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+		if (pktlen != len + SDPCM_TEST_HDRLEN) {
+			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+			           " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+			PKTFREE(osh, pkt, FALSE);
+			return;
+		}
+	}
+
+	/* Process as per command */
+	switch (cmd) {
+	case SDPCM_TEST_ECHOREQ:
+		/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+		*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+		if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) {
+			bus->pktgen_sent++;
+		} else {
+			bus->pktgen_fail++;
+			PKTFREE(osh, pkt, FALSE);
+		}
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_ECHORSP:
+		if (bus->ext_loop) {
+			PKTFREE(osh, pkt, FALSE);
+			bus->pktgen_rcvd++;
+			break;
+		}
+
+		for (offset = 0; offset < len; offset++, data++) {
+			if (*data != SDPCM_TEST_FILL(offset, extra)) {
+				DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+				           "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+				           offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+				break;
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_DISCARD:
+		{
+			int i = 0;
+			uint8 *prn = data;
+			uint8 testval = extra;
+			for (i = 0; i < len; i++) {
+				if (*prn != testval) {
+					DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+						i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+					prn++; testval++;
+				}
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_BURST:
+	case SDPCM_TEST_SEND:
+	default:
+		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+		          " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+		PKTFREE(osh, pkt, FALSE);
+		break;
+	}
+
+	/* For recv mode, stop at limit (and tell dongle to stop sending) */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcvd_rcvsession++;
+
+			if (bus->pktgen_total &&
+				(bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+	            DHD_ERROR(("Pktgen:rcv test complete!\n"));
+	            bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+			dhdsdio_sdtest_set(bus, FALSE);
+				bus->pktgen_rcvd_rcvsession = 0;
+			}
+		}
+	}
+}
+#endif /* SDTEST */
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus;
+
+	DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+	bus = dhdp->bus;
+
+	if (bus->dhd->dongle_reset)
+		return FALSE;
+
+	/* Ignore the timer if simulating bus down */
+	if (bus->sleeping)
+		return FALSE;
+
+	if (dhdp->busstate == DHD_BUS_DOWN)
+		return FALSE;
+
+	/* Poll period: check device if appropriate. */
+	if (bus->poll && (++bus->polltick >= bus->pollrate)) {
+		uint32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+			if (!bus->dpc_sched) {
+				uint8 devpend;
+				devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+				                          SDIOD_CCCR_INTPEND, NULL);
+				intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and schedule the DPC */
+			if (intstatus) {
+				bus->pollcnt++;
+				bus->ipend = TRUE;
+				if (bus->intr) {
+					bcmsdh_intr_disable(bus->sdh);
+				}
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->lastintrs = bus->intrcount;
+	}
+
+#ifdef DHD_DEBUG
+	/* Poll for console output periodically */
+	if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+			if (dhdsdio_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+	/* Generate packets if configured */
+	if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+		/* Make sure backplane clock is on */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		bus->pktgen_tick = 0;
+		dhdsdio_pktgen(bus);
+	}
+#endif
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+	if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+		if (++bus->idlecount >= bus->idletime) {
+			bus->idlecount = 0;
+			if (bus->activity) {
+				bus->activity = FALSE;
+				dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+			}
+		}
+	}
+
+	return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	uint32 addr, val;
+	int rv;
+	void *pkt;
+
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Exclusive bus access */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	BUS_WAKE(bus);
+	/* No pend allowed since txpkt is called later, ht clk has to be on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Bump dongle by sending an empty packet on the event channel.
+	 * sdpcm_sendup (RX) checks for virtual console input.
+	 */
+	if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+		dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE);
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+	uint byte, tag, tdata;
+	DHD_INFO(("Function %d CIS:\n", fn));
+
+	for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+		if ((byte % 16) == 0)
+			DHD_INFO(("    "));
+		DHD_INFO(("%02x ", cis[byte]));
+		if ((byte % 16) == 15)
+			DHD_INFO(("\n"));
+		if (!tdata--) {
+			tag = cis[byte];
+			if (tag == 0xff)
+				break;
+			else if (!tag)
+				tdata = 0;
+			else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+				tdata = cis[byte + 1] + 1;
+			else
+				DHD_INFO(("]"));
+		}
+	}
+	if ((byte % 16) != 15)
+		DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+	if (chipid == BCM4325_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4329_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4315_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4319_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4336_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4330_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43237_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43362_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43239_CHIP_ID)
+		return TRUE;
+	return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+	uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+	int ret;
+	dhd_bus_t *bus;
+	dhd_cmn_t *cmn;
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_txbound = DHD_TXBOUND;
+	dhd_rxbound = DHD_RXBOUND;
+	dhd_alignctl = TRUE;
+	sd1idle = TRUE;
+	dhd_readahead = TRUE;
+	retrydata = FALSE;
+	dhd_doflow = TRUE;
+	dhd_dongle_memsize = 0;
+	dhd_txminmax = DHD_TXMINMAX;
+
+	forcealign = TRUE;
+
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+	/* We make assumptions about address window mappings */
+	ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+	/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+	 * means early parse could fail, so here we should get either an ID
+	 * we recognize OR (-1) indicating we must request power first.
+	 */
+	/* Check the Vendor ID */
+	switch (venid) {
+		case 0x0000:
+		case VENDOR_BROADCOM:
+			break;
+		default:
+			DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+			           __FUNCTION__, venid));
+			return NULL;
+	}
+
+	/* Check the Device ID and make sure it's one that we support */
+	switch (devid) {
+		case BCM4325_D11DUAL_ID:		/* 4325 802.11a/g id */
+		case BCM4325_D11G_ID:			/* 4325 802.11g 2.4Ghz band id */
+		case BCM4325_D11A_ID:			/* 4325 802.11a 5Ghz band id */
+			DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4329_D11N_ID:		/* 4329 802.11n dualband device */
+		case BCM4329_D11N2G_ID:		/* 4329 802.11n 2.4G device */
+		case BCM4329_D11N5G_ID:		/* 4329 802.11n 5G device */
+		case 0x4329:
+			DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4315_D11DUAL_ID:		/* 4315 802.11a/g id */
+		case BCM4315_D11G_ID:			/* 4315 802.11g id */
+		case BCM4315_D11A_ID:			/* 4315 802.11a id */
+			DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4319_D11N_ID:			/* 4319 802.11n id */
+		case BCM4319_D11N2G_ID:			/* 4319 802.11n2g id */
+		case BCM4319_D11N5G_ID:			/* 4319 802.11n5g id */
+			DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+			break;
+		case 0:
+			DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+			          __FUNCTION__));
+			break;
+
+		default:
+			DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+			           __FUNCTION__, venid, devid));
+			return NULL;
+	}
+
+	if (osh == NULL) {
+		/* Ask the OS interface part for an OSL handle */
+		if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
+			DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+			return NULL;
+		}
+	}
+
+	/* Allocate private bus interface state */
+	if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+		DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+		goto fail;
+	}
+	bzero(bus, sizeof(dhd_bus_t));
+	bus->sdh = sdh;
+	bus->cl_devid = (uint16)devid;
+	bus->bus = DHD_BUS;
+	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+	bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+	/* attach the common module */
+	if (!(cmn = dhd_common_init(osh))) {
+		DHD_ERROR(("%s: dhd_common_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* attempt to attach to the dongle */
+	if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+		DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+		dhd_common_deinit(NULL, cmn);
+		goto fail;
+	}
+
+	/* Attach to the dhd/OS/network interface */
+	if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+		DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	bus->dhd->cmn = cmn;
+	cmn->dhd = bus->dhd;
+
+	/* Allocate buffers */
+	if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Register interrupt callback, but mask it (not operational yet). */
+	DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+	bcmsdh_intr_disable(sdh);
+	if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+		DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+		           __FUNCTION__, ret));
+		goto fail;
+	}
+	DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+
+	DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	/* Read MAC address from external customer place 	*/
+	memset(&ea_addr, 0, sizeof(ea_addr));
+	ret = dhd_custom_get_mac_address(ea_addr.octet);
+	if (!ret) {
+		memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	/* if firmware path present try to download and bring up bus */
+	if (dhd_download_fw_on_driverload && (ret = dhd_bus_start(bus->dhd)) != 0) {
+		DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+		if (ret == BCME_NOTUP)
+			goto fail;
+	}
+	/* Ok, have the per-port tell the stack we're open for business */
+	if (dhd_net_attach(bus->dhd, 0) != 0) {
+		DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	return bus;
+
+fail:
+	dhdsdio_release(bus, osh);
+	return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+                     uint16 devid)
+{
+	int err = 0;
+	uint8 clkctl = 0;
+
+	bus->alp_only = TRUE;
+
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+		DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+	}
+
+#ifdef DHD_DEBUG
+	DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+	       bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+
+#endif /* DHD_DEBUG */
+
+
+	/* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+	if (!err)
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+		DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+		           err, DHD_INIT_CLKCTL1, clkctl));
+		goto fail;
+	}
+
+
+#ifdef DHD_DEBUG
+	if (DHD_INFO_ON()) {
+		uint fn, numfn;
+		uint8 *cis[SDIOD_MAX_IOFUNCS];
+		int err = 0;
+
+		numfn = bcmsdh_query_iofnum(sdh);
+		ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+		/* Make sure ALP is available before trying to read CIS */
+		SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+		                                    SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+		          !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+		/* Now request ALP be put on the bus */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 DHD_INIT_CLKCTL2, &err);
+		OSL_DELAY(65);
+
+		for (fn = 0; fn <= numfn; fn++) {
+			if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+				break;
+			}
+			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+			if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+				MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+				break;
+			}
+			dhd_dump_cis(fn, cis[fn]);
+		}
+
+		while (fn-- > 0) {
+			ASSERT(cis[fn]);
+			MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+		}
+
+		if (err) {
+			DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+			goto fail;
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+	if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+		DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+		           __FUNCTION__, bus->sih->chip));
+		goto fail;
+	}
+
+
+	si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+	/* Get info on the ARM and SOCRAM cores... */
+	if (!DHD_NOPMU(bus)) {
+		if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			bus->armrev = si_corerev(bus->sih);
+		} else {
+			DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+			goto fail;
+		}
+		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+		bus->ramsize = bus->orig_ramsize;
+		if (dhd_dongle_memsize)
+			dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+		DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+			bus->ramsize, bus->orig_ramsize));
+	}
+
+	/* ...but normally deal with the SDPCMDEV core */
+	if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+	    !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+		DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->sdpcmrev = si_corerev(bus->sih);
+
+	/* Set core control so an SDIO reset does a backplane reset */
+	OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+	bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+	if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		(bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1))
+	{
+		uint32 val;
+
+		val = R_REG(osh, &bus->regs->corecontrol);
+		val &= ~CC_XMTDATAAVAIL_MODE;
+		val |= CC_XMTDATAAVAIL_CTRL;
+		W_REG(osh, &bus->regs->corecontrol, val);
+	}
+
+
+	pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	return TRUE;
+
+fail:
+	if (bus->sih != NULL)
+		si_detach(bus->sih);
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen)
+			DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+
+	/* Align the buffer */
+	if ((uintptr)bus->databuf % DHD_SDALIGN)
+		bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+	else
+		bus->dataptr = bus->databuf;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	int32 fnum;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef SDTEST
+	dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	bus->sleeping = FALSE;
+	bus->rxflow = FALSE;
+	bus->prev_rxlim_hit = 0;
+
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = (int32)dhd_idletime;
+	bus->idleclock = DHD_IDLE_ACTIVE;
+
+	/* Query the SD clock speed */
+	if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+	                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+		bus->sd_divisor = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_divisor", bus->sd_divisor));
+	}
+
+	/* Query the SD bus mode */
+	if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+	                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+		bus->sd_mode = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_mode", bus->sd_mode));
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	fnum = 2;
+	if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+	                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+		bus->blocksize = 0;
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_blocksize", bus->blocksize));
+	}
+	bus->roundup = MIN(max_roundup, bus->blocksize);
+
+	/* Query if bus module supports packet chaining, default to use if supported */
+	if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+	                    &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+		bus->sd_rxchain = FALSE;
+	} else {
+		DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+		          __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+	}
+	bus->use_rxchain = (bool)bus->sd_rxchain;
+
+	return TRUE;
+}
+
+bool
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path)
+{
+	bool ret;
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+
+	ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+	return ret;
+}
+
+static bool
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	bool ret;
+
+	/* Download the firmware */
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	ret = _dhdsdio_download_firmware(bus) == 0;
+
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+	bool dongle_isolation = FALSE;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(osh);
+
+		/* De-register interrupt handler */
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_intr_dereg(bus->sdh);
+
+		if (bus->dhd) {
+			dhd_common_deinit(bus->dhd, NULL);
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhd_detach(bus->dhd);
+			dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+
+	if (osh)
+		dhd_osl_detach(osh);
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->rxbuf) {
+#ifndef DHD_USE_STATIC_BUF
+		MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+		bus->rxctl = bus->rxbuf = NULL;
+		bus->rxlen = 0;
+	}
+
+	if (bus->databuf) {
+#ifndef DHD_USE_STATIC_BUF
+		MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+		bus->databuf = NULL;
+	}
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+		return;
+
+	if (bus->sih) {
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		}
+#if !defined(BCMLXSDMMC)
+		if (dongle_isolation == FALSE)
+			si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+		si_detach(bus->sih);
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(bus->dhd);
+		dhdsdio_release(bus, bus->dhd->osh);
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+	dhdsdio_probe,
+	dhdsdio_disconnect
+};
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_unregister();
+}
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *ularray = NULL;
+
+	DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+	/* Download image */
+	while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), MEMBLOCK);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+	if (offset < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+			goto err;
+		}
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+		/* Upload image to verify downloaded contents. */
+		offset = 0;
+		memset(ularray, 0xaa, bus->ramsize);
+		while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+
+			offset += MEMBLOCK;
+		}
+
+		if (offset < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+				ularray + offset, sizeof(dlarray) - offset);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+				goto err;
+			}
+		}
+
+		if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+			goto err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+	}
+#endif /* DHD_DEBUG */
+
+err:
+	if (ularray)
+		MFREE(bus->dhd->osh, ularray, bus->ramsize);
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	uint len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL)
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+/* 
+	EXAMPLE: nvram_array
+	nvram_arry format:
+	name=value
+	Use carriage return at the end of each assignment, and an empty string with
+	carriage return at the end of array.
+
+	For example:
+	unsigned char  nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+	Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+	Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+	bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+	}
+	else {
+		len = strlen(bus->nvram_params);
+		ASSERT(len <= MAX_NVRAMBUF_SIZE);
+		memcpy(memblock, bus->nvram_params, len);
+	}
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+		len = process_nvram_vars(bufp, len);
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_SDIO_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdsdio_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdsdio_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+	/* External nvram takes precedence if specified */
+	if (dhdsdio_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+	}
+
+	/* Take arm out of reset */
+	if (dhdsdio_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	int status;
+
+	status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+	return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+	return SDPCM_HDRLEN;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+
+	if (flag == TRUE) {
+		if (!bus->dhd->dongle_reset) {
+			dhd_os_sdlock(dhdp);
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			/* Expect app to have torn down any connection before calling */
+			/* Stop the bus, disable F2 */
+			dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+			/* Clean up any pending IRQ */
+			bcmsdh_set_irq(FALSE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+			/* Clean tx/rx buffer pointers, detach from the dongle */
+			dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+			bus->dhd->dongle_reset = TRUE;
+			bus->dhd->up = FALSE;
+			dhd_os_sdunlock(dhdp);
+
+			DHD_TRACE(("%s:  WLAN OFF DONE\n", __FUNCTION__));
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_SDIO_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+
+		DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+		if (bus->dhd->dongle_reset) {
+			/* Turn on WLAN */
+			dhd_os_sdlock(dhdp);
+
+			/* Reset SD client */
+			bcmsdh_reset(bus->sdh);
+
+			/* Attempt to re-attach & download */
+			if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+			                        (uint32 *)SI_ENUM_BASE,
+			                        bus->cl_devid)) {
+				/* Attempt to download binary to the dongle */
+				if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+					dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+
+					/* Re-init bus, enable F2 transfer */
+					bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+					if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+						bcmsdh_set_irq(TRUE);
+						dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+					bus->dhd->dongle_reset = FALSE;
+					bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+					/* Restore flow control  */
+					dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif 
+					dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+					DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+					} else {
+						dhd_bus_stop(bus, FALSE);
+						dhdsdio_release_dongle(bus, bus->dhd->osh,
+							TRUE, FALSE);
+					}
+				} else
+					bcmerror = BCME_SDIO_ERROR;
+			} else
+				bcmerror = BCME_SDIO_ERROR;
+
+			dhd_os_sdunlock(dhdp);
+		} else {
+			bcmerror = BCME_NOTDOWN;
+			DHD_ERROR(("%s: Set DEVRESET=FALSE invoked when device is on\n",
+				__FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+		}
+	}
+	return bcmerror;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+	return dhdsdio_membytes(bus, set, address, data, size);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 0000000..53db62c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,248 @@
+/*
+* Copyright (C) 1999-2011, Broadcom Corporation
+* 
+*         Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: dhd_wlfc.h,v 1.1.8.1 2010-09-09 22:41:08 Exp $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 1024
+
+#define WLFC_HANGER_ITEM_STATE_FREE		1
+#define WLFC_HANGER_ITEM_STATE_INUSE	2
+
+#define WLFC_PKTID_HSLOT_MASK			0xffff /* allow 16 bits only */
+#define WLFC_PKTID_HSLOT_SHIFT			8
+
+/* x -> TXSTATUS TAG to/from firmware */
+#define WLFC_PKTID_HSLOT_GET(x)			\
+	(((x) >> WLFC_PKTID_HSLOT_SHIFT) & WLFC_PKTID_HSLOT_MASK)
+#define WLFC_PKTID_HSLOT_SET(var, slot)	\
+	((var) = ((var) & ~(WLFC_PKTID_HSLOT_MASK << WLFC_PKTID_HSLOT_SHIFT)) | \
+	(((slot) & WLFC_PKTID_HSLOT_MASK) << WLFC_PKTID_HSLOT_SHIFT))
+
+#define WLFC_PKTID_FREERUNCTR_MASK	0xff
+
+#define WLFC_PKTID_FREERUNCTR_GET(x)	((x) & WLFC_PKTID_FREERUNCTR_MASK)
+#define WLFC_PKTID_FREERUNCTR_SET(var, ctr)	\
+	((var) = (((var) & ~WLFC_PKTID_FREERUNCTR_MASK) | \
+	(((ctr) & WLFC_PKTID_FREERUNCTR_MASK))))
+
+#define WLFC_PKTQ_PENQ(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec)))? \
+	NULL : pktq_penq((pq), (prec), (p)))
+#define WLFC_PKTQ_PENQ_HEAD(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec))) ? \
+	NULL : pktq_penq_head((pq), (prec), (p)))
+
+typedef enum ewlfc_packet_state {
+	eWLFC_PKTTYPE_NEW,
+	eWLFC_PKTTYPE_DELAYED,
+	eWLFC_PKTTYPE_SUPPRESSED,
+	eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+	eWLFC_MAC_ENTRY_ACTION_ADD,
+	eWLFC_MAC_ENTRY_ACTION_DEL,
+	eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+	uint8	state;
+	uint8	pad[3];
+	uint32	identifier;
+	void*	pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32	push_time;
+#endif
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+	int max_items;
+	uint32 pushed;
+	uint32 popped;
+	uint32 failed_to_push;
+	uint32 failed_to_pop;
+	uint32 failed_slotfind;
+	wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n)	((sizeof(wlfc_hanger_t) - \
+	sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN		1
+#define WLFC_STATE_CLOSE	2
+
+#define WLFC_PSQ_PREC_COUNT		((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_PSQ_LEN			64
+#define WLFC_SENDQ_LEN			256
+
+#define WLFC_FLOWCONTROL_DELTA		8
+#define WLFC_FLOWCONTROL_HIWATER	(WLFC_PSQ_LEN - WLFC_FLOWCONTROL_DELTA)
+#define WLFC_FLOWCONTROL_LOWATER	(WLFC_FLOWCONTROL_HIWATER - WLFC_FLOWCONTROL_DELTA)
+
+typedef struct wlfc_mac_descriptor {
+	uint8 occupied;
+	uint8 interface_id;
+	uint8 iftype;
+	uint8 state;
+	uint8 ac_bitmap; /* for APSD */
+	uint8 requested_credit;
+	uint8 requested_packet;
+	uint8 ea[ETHER_ADDR_LEN];
+	/*
+	maintain (MAC,AC) based seq count for
+	packets going to the device. As well as bc/mc.
+	*/
+	uint8 seq[AC_COUNT + 1];
+	uint8 generation;
+	struct pktq	psq;
+	/* The AC pending bitmap that was reported to the fw at last change */
+	uint8 traffic_lastreported_bmp;
+	/* The new AC pending bitmap */
+	uint8 traffic_pending_bmp;
+	/* 1= send on next opportunity */
+	uint8 send_tim_signal;
+	uint8 mac_handle;
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32 dstncredit_sent_packets;
+	uint32 dstncredit_acks;
+	uint32 opened_ct;
+	uint32 closed_ct;
+#endif
+} wlfc_mac_descriptor_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+	entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+	uint32	pktin;
+	uint32	pkt2bus;
+	uint32	pktdropped;
+	uint32	tlv_parse_failed;
+	uint32	rollback;
+	uint32	rollback_failed;
+	uint32	sendq_full_error;
+	uint32	delayq_full_error;
+	uint32	credit_request_failed;
+	uint32	packet_request_failed;
+	uint32	mac_update_failed;
+	uint32	psmode_update_failed;
+	uint32	interface_update_failed;
+	uint32	wlfc_header_only_pkt;
+	uint32	txstatus_in;
+	uint32	d11_suppress;
+	uint32	wl_suppress;
+	uint32	bad_suppress;
+	uint32	pkt_freed;
+	uint32	pkt_free_err;
+	uint32	psq_wlsup_retx;
+	uint32	psq_wlsup_enq;
+	uint32	psq_d11sup_retx;
+	uint32	psq_d11sup_enq;
+	uint32	psq_hostq_retx;
+	uint32	psq_hostq_enq;
+	uint32	mac_handle_notfound;
+	uint32	wlc_tossed_pkts;
+	uint32	dhd_hdrpulls;
+	uint32	generic_error;
+	/* an extra one for bc/mc traffic */
+	uint32	sendq_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+	/* all pkt2bus -> txstatus latency accumulated */
+	uint32	latency_sample_count;
+	uint32	total_status_latency;
+	uint32	latency_most_recent;
+	int		idx_delta;
+	uint32	deltas[10];
+	uint32	fifo_credits_sent[6];
+	uint32	fifo_credits_back[6];
+	uint32	dropped_qfull[6];
+	uint32	signal_only_pkts_sent;
+	uint32	signal_only_pkts_freed;
+#endif
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+	(ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE				0
+#define WLFC_FCMODE_IMPLIED_CREDIT		1
+#define WLFC_FCMODE_EXPLICIT_CREDIT		2
+
+typedef struct athost_wl_status_info {
+	uint8	last_seqid_to_wlc;
+
+	/* OSL handle */
+	osl_t*	osh;
+	/* dhd pub */
+	void*	dhdp;
+
+	/* stats */
+	athost_wl_stat_counters_t stats;
+
+	/* the additional ones are for bc/mc and ATIM FIFO */
+	int     FIFO_credit[AC_COUNT + 2];
+	struct  pktq SENDQ;
+
+	/* packet hanger and MAC->handle lookup table */
+	void*	hanger;
+	struct {
+		/* table for individual nodes */
+		wlfc_mac_descriptor_t	nodes[WLFC_MAC_DESC_TABLE_SIZE];
+		/* table for interfaces */
+		wlfc_mac_descriptor_t	interfaces[WLFC_MAX_IFNUM];
+		/* OS may send packets to unknown (unassociated) destinations */
+		/* A place holder for bc/mc and packets to unknown destinations */
+		wlfc_mac_descriptor_t	other;
+	} destination_entries;
+	/* token position for different priority packets */
+	uint8   token_pos[AC_COUNT];
+	/* ON/OFF state for flow control to the host network interface */
+	uint8	hostif_flow_state[WLFC_MAX_IFNUM];
+	uint8	host_ifidx;
+	/* to flow control an OS interface */
+	uint8	toggle_host_if;
+
+	/*
+	Mode in which the dhd flow control shall operate. Must be set before
+	traffic starts to the device.
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	*/
+	uint8	proptxstatus_mode;
+} athost_wl_status_info_t;
+
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 0000000..9cdf718
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h,v 1.5 2008-06-02 16:56:20 Exp $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+	unsigned long	rx_packets;		/* total packets received */
+	unsigned long	tx_packets;		/* total packets transmitted */
+	unsigned long	rx_bytes;		/* total bytes received */
+	unsigned long	tx_bytes;		/* total bytes transmitted */
+	unsigned long	rx_errors;		/* bad packets received */
+	unsigned long	tx_errors;		/* packet transmit problems */
+	unsigned long	rx_dropped;		/* packets dropped by dongle */
+	unsigned long	tx_dropped;		/* packets dropped by dongle */
+	unsigned long   multicast;      /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 0000000..8b39b9e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,40 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_wlhdr.h,v 1.1 2009-01-08 01:21:12 Exp $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+    uint8   type;           /* Header type */
+    uint8   version;        /* Header version */
+	int8	rssi;			/* RSSI */
+	uint8	pad;			/* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN   sizeof(wl_header_t)
+#define WL_HEADER_TYPE  0
+#define WL_HEADER_VER   1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 0000000..b9586e4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,222 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c,v 1.228.2.56 2011-02-11 22:49:07 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define	PMU_ERROR(args)
+
+#define	PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define	PMU_NONE(args)
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+	uint8 strength;			/* Pad Drive Strength in mA */
+	uint8 sel;			/* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+	{4, 0x2},
+	{2, 0x3},
+	{1, 0x0},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+	{32, 0x7},
+	{26, 0x6},
+	{22, 0x5},
+	{16, 0x4},
+	{12, 0x3},
+	{8, 0x2},
+	{4, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v2[] = {
+	{16, 0x3},
+	{13, 0x2},
+	{11, 0x1},
+	{8, 0x0},
+	{6, 0x7},
+	{4, 0x6},
+	{2, 0x5},
+	{0, 0x4} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_2v5[] = {
+	{80, 0x5},
+	{65, 0x4},
+	{55, 0x7},
+	{40, 0x6},
+	{30, 0x1},
+	{20, 0x0},
+	{10, 0x3},
+	{0, 0x2} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+	{6, 0x7},
+	{5, 0x6},
+	{4, 0x5},
+	{3, 0x4},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_3v3[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
+
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+	chipcregs_t *cc;
+	uint origidx, intr_val = 0;
+	sdiod_drive_str_t *str_tab = NULL;
+	uint32 str_mask = 0;
+	uint32 str_shift = 0;
+
+	if (!(sih->cccaps & CC_CAP_PMU)) {
+		return;
+	}
+
+	/* Remember original core before switch to chipc */
+	cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+	switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+		str_mask = 0x30000000;
+		str_shift = 28;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+	case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+		if (sih->pmurev == 8) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+		}
+		else if (sih->pmurev == 11) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		}
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	default:
+		PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+		         bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+
+		break;
+	}
+
+	if (str_tab != NULL) {
+		uint32 cc_data_temp;
+		int i;
+
+		/* Pick the lowest available drive strength equal or greater than the
+		 * requested strength.	Drive strength of 0 requests tri-state.
+		 */
+		for (i = 0; drivestrength < str_tab[i].strength; i++)
+			;
+
+		if (i > 0 && drivestrength > str_tab[i].strength)
+			i--;
+
+		W_REG(osh, &cc->chipcontrol_addr, 1);
+		cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+		cc_data_temp &= ~str_mask;
+		cc_data_temp |= str_tab[i].sel << str_shift;
+		W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+		PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+		         drivestrength, str_tab[i].strength));
+	}
+
+	/* Return to original core */
+	si_restore_core(sih, origidx, intr_val);
+}
diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile
new file mode 100644
index 0000000..c07266f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/Makefile
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+#    automerger maintained database to see where src/include
+#    came from
+# 2. For select components, as listed in compvers.sh
+#    it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 241702 2011-02-19 00:41:03Z automrgr $
+#
+
+SRCBASE := ..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch version
+epivers:
+	bash epivers.sh
+
+# Generate epivers.h for native branch version
+compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "Generating component versions, if any"; \
+		bash compvers.sh; \
+	else \
+		echo "Skipping component version generation"; \
+	fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "bash compvers.sh clean"; \
+		bash compvers.sh clean; \
+	else \
+		echo "Skipping component version clean"; \
+	fi
+
+clean:
+	rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 0000000..375df44
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,377 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h,v 13.4.14.1 2010-03-09 18:40:06 Exp $
+ */
+
+
+#ifndef	_AIDMP_H
+#define	_AIDMP_H
+
+
+#define	MFGID_ARM		0x43b
+#define	MFGID_BRCM		0x4bf
+#define	MFGID_MIPS		0x4a7
+
+
+#define	CC_SIM			0
+#define	CC_EROM			1
+#define	CC_CORESIGHT		9
+#define	CC_VERIF		0xb
+#define	CC_OPTIMO		0xd
+#define	CC_GEN			0xe
+#define	CC_PRIMECELL		0xf
+
+
+#define	ER_EROMENTRY		0x000
+#define	ER_REMAPCONTROL		0xe00
+#define	ER_REMAPSELECT		0xe04
+#define	ER_MASTERSELECT		0xe10
+#define	ER_ITCR			0xf00
+#define	ER_ITIP			0xf04
+
+
+#define	ER_TAG			0xe
+#define	ER_TAG1			0x6
+#define	ER_VALID		1
+#define	ER_CI			0
+#define	ER_MP			2
+#define	ER_ADD			4
+#define	ER_END			0xe
+#define	ER_BAD			0xffffffff
+
+
+#define	CIA_MFG_MASK		0xfff00000
+#define	CIA_MFG_SHIFT		20
+#define	CIA_CID_MASK		0x000fff00
+#define	CIA_CID_SHIFT		8
+#define	CIA_CCL_MASK		0x000000f0
+#define	CIA_CCL_SHIFT		4
+
+
+#define	CIB_REV_MASK		0xff000000
+#define	CIB_REV_SHIFT		24
+#define	CIB_NSW_MASK		0x00f80000
+#define	CIB_NSW_SHIFT		19
+#define	CIB_NMW_MASK		0x0007c000
+#define	CIB_NMW_SHIFT		14
+#define	CIB_NSP_MASK		0x00003e00
+#define	CIB_NSP_SHIFT		9
+#define	CIB_NMP_MASK		0x000001f0
+#define	CIB_NMP_SHIFT		4
+
+
+#define	MPD_MUI_MASK		0x0000ff00
+#define	MPD_MUI_SHIFT		8
+#define	MPD_MP_MASK		0x000000f0
+#define	MPD_MP_SHIFT		4
+
+
+#define	AD_ADDR_MASK		0xfffff000
+#define	AD_SP_MASK		0x00000f00
+#define	AD_SP_SHIFT		8
+#define	AD_ST_MASK		0x000000c0
+#define	AD_ST_SHIFT		6
+#define	AD_ST_SLAVE		0x00000000
+#define	AD_ST_BRIDGE		0x00000040
+#define	AD_ST_SWRAP		0x00000080
+#define	AD_ST_MWRAP		0x000000c0
+#define	AD_SZ_MASK		0x00000030
+#define	AD_SZ_SHIFT		4
+#define	AD_SZ_4K		0x00000000
+#define	AD_SZ_8K		0x00000010
+#define	AD_SZ_16K		0x00000020
+#define	AD_SZ_SZD		0x00000030
+#define	AD_AG32			0x00000008
+#define	AD_ADDR_ALIGN		0x00000fff
+#define	AD_SZ_BASE		0x00001000	
+
+
+#define	SD_SZ_MASK		0xfffff000
+#define	SD_SG32			0x00000008
+#define	SD_SZ_ALIGN		0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+	uint32	oobselina30;	
+	uint32	oobselina74;	
+	uint32	PAD[6];
+	uint32	oobselinb30;	
+	uint32	oobselinb74;	
+	uint32	PAD[6];
+	uint32	oobselinc30;	
+	uint32	oobselinc74;	
+	uint32	PAD[6];
+	uint32	oobselind30;	
+	uint32	oobselind74;	
+	uint32	PAD[38];
+	uint32	oobselouta30;	
+	uint32	oobselouta74;	
+	uint32	PAD[6];
+	uint32	oobseloutb30;	
+	uint32	oobseloutb74;	
+	uint32	PAD[6];
+	uint32	oobseloutc30;	
+	uint32	oobseloutc74;	
+	uint32	PAD[6];
+	uint32	oobseloutd30;	
+	uint32	oobseloutd74;	
+	uint32	PAD[38];
+	uint32	oobsynca;	
+	uint32	oobseloutaen;	
+	uint32	PAD[6];
+	uint32	oobsyncb;	
+	uint32	oobseloutben;	
+	uint32	PAD[6];
+	uint32	oobsyncc;	
+	uint32	oobseloutcen;	
+	uint32	PAD[6];
+	uint32	oobsyncd;	
+	uint32	oobseloutden;	
+	uint32	PAD[38];
+	uint32	oobaextwidth;	
+	uint32	oobainwidth;	
+	uint32	oobaoutwidth;	
+	uint32	PAD[5];
+	uint32	oobbextwidth;	
+	uint32	oobbinwidth;	
+	uint32	oobboutwidth;	
+	uint32	PAD[5];
+	uint32	oobcextwidth;	
+	uint32	oobcinwidth;	
+	uint32	oobcoutwidth;	
+	uint32	PAD[5];
+	uint32	oobdextwidth;	
+	uint32	oobdinwidth;	
+	uint32	oobdoutwidth;	
+	uint32	PAD[37];
+	uint32	ioctrlset;	
+	uint32	ioctrlclear;	
+	uint32	ioctrl;		
+	uint32	PAD[61];
+	uint32	iostatus;	
+	uint32	PAD[127];
+	uint32	ioctrlwidth;	
+	uint32	iostatuswidth;	
+	uint32	PAD[62];
+	uint32	resetctrl;	
+	uint32	resetstatus;	
+	uint32	resetreadid;	
+	uint32	resetwriteid;	
+	uint32	PAD[60];
+	uint32	errlogctrl;	
+	uint32	errlogdone;	
+	uint32	errlogstatus;	
+	uint32	errlogaddrlo;	
+	uint32	errlogaddrhi;	
+	uint32	errlogid;	
+	uint32	errloguser;	
+	uint32	errlogflags;	
+	uint32	PAD[56];
+	uint32	intstatus;	
+	uint32	PAD[127];
+	uint32	config;		
+	uint32	PAD[63];
+	uint32	itcr;		
+	uint32	PAD[3];
+	uint32	itipooba;	
+	uint32	itipoobb;	
+	uint32	itipoobc;	
+	uint32	itipoobd;	
+	uint32	PAD[4];
+	uint32	itipoobaout;	
+	uint32	itipoobbout;	
+	uint32	itipoobcout;	
+	uint32	itipoobdout;	
+	uint32	PAD[4];
+	uint32	itopooba;	
+	uint32	itopoobb;	
+	uint32	itopoobc;	
+	uint32	itopoobd;	
+	uint32	PAD[4];
+	uint32	itopoobain;	
+	uint32	itopoobbin;	
+	uint32	itopoobcin;	
+	uint32	itopoobdin;	
+	uint32	PAD[4];
+	uint32	itopreset;	
+	uint32	PAD[15];
+	uint32	peripherialid4;	
+	uint32	peripherialid5;	
+	uint32	peripherialid6;	
+	uint32	peripherialid7;	
+	uint32	peripherialid0;	
+	uint32	peripherialid1;	
+	uint32	peripherialid2;	
+	uint32	peripherialid3;	
+	uint32	componentid0;	
+	uint32	componentid1;	
+	uint32	componentid2;	
+	uint32	componentid3;	
+} aidmp_t;
+
+#endif 
+
+
+#define	OOB_BUSCONFIG		0x020
+#define	OOB_STATUSA		0x100
+#define	OOB_STATUSB		0x104
+#define	OOB_STATUSC		0x108
+#define	OOB_STATUSD		0x10c
+#define	OOB_ENABLEA0		0x200
+#define	OOB_ENABLEA1		0x204
+#define	OOB_ENABLEA2		0x208
+#define	OOB_ENABLEA3		0x20c
+#define	OOB_ENABLEB0		0x280
+#define	OOB_ENABLEB1		0x284
+#define	OOB_ENABLEB2		0x288
+#define	OOB_ENABLEB3		0x28c
+#define	OOB_ENABLEC0		0x300
+#define	OOB_ENABLEC1		0x304
+#define	OOB_ENABLEC2		0x308
+#define	OOB_ENABLEC3		0x30c
+#define	OOB_ENABLED0		0x380
+#define	OOB_ENABLED1		0x384
+#define	OOB_ENABLED2		0x388
+#define	OOB_ENABLED3		0x38c
+#define	OOB_ITCR		0xf00
+#define	OOB_ITIPOOBA		0xf10
+#define	OOB_ITIPOOBB		0xf14
+#define	OOB_ITIPOOBC		0xf18
+#define	OOB_ITIPOOBD		0xf1c
+#define	OOB_ITOPOOBA		0xf30
+#define	OOB_ITOPOOBB		0xf34
+#define	OOB_ITOPOOBC		0xf38
+#define	OOB_ITOPOOBD		0xf3c
+
+
+#define	AI_OOBSELINA30		0x000
+#define	AI_OOBSELINA74		0x004
+#define	AI_OOBSELINB30		0x020
+#define	AI_OOBSELINB74		0x024
+#define	AI_OOBSELINC30		0x040
+#define	AI_OOBSELINC74		0x044
+#define	AI_OOBSELIND30		0x060
+#define	AI_OOBSELIND74		0x064
+#define	AI_OOBSELOUTA30		0x100
+#define	AI_OOBSELOUTA74		0x104
+#define	AI_OOBSELOUTB30		0x120
+#define	AI_OOBSELOUTB74		0x124
+#define	AI_OOBSELOUTC30		0x140
+#define	AI_OOBSELOUTC74		0x144
+#define	AI_OOBSELOUTD30		0x160
+#define	AI_OOBSELOUTD74		0x164
+#define	AI_OOBSYNCA		0x200
+#define	AI_OOBSELOUTAEN		0x204
+#define	AI_OOBSYNCB		0x220
+#define	AI_OOBSELOUTBEN		0x224
+#define	AI_OOBSYNCC		0x240
+#define	AI_OOBSELOUTCEN		0x244
+#define	AI_OOBSYNCD		0x260
+#define	AI_OOBSELOUTDEN		0x264
+#define	AI_OOBAEXTWIDTH		0x300
+#define	AI_OOBAINWIDTH		0x304
+#define	AI_OOBAOUTWIDTH		0x308
+#define	AI_OOBBEXTWIDTH		0x320
+#define	AI_OOBBINWIDTH		0x324
+#define	AI_OOBBOUTWIDTH		0x328
+#define	AI_OOBCEXTWIDTH		0x340
+#define	AI_OOBCINWIDTH		0x344
+#define	AI_OOBCOUTWIDTH		0x348
+#define	AI_OOBDEXTWIDTH		0x360
+#define	AI_OOBDINWIDTH		0x364
+#define	AI_OOBDOUTWIDTH		0x368
+
+
+#define	AI_IOCTRLSET		0x400
+#define	AI_IOCTRLCLEAR		0x404
+#define	AI_IOCTRL		0x408
+#define	AI_IOSTATUS		0x500
+#define	AI_RESETCTRL		0x800
+#define	AI_RESETSTATUS		0x804
+
+
+#define	AI_IOCTRLWIDTH		0x700
+#define	AI_IOSTATUSWIDTH	0x704
+
+#define	AI_RESETREADID		0x808
+#define	AI_RESETWRITEID		0x80c
+#define	AI_ERRLOGCTRL		0xa00
+#define	AI_ERRLOGDONE		0xa04
+#define	AI_ERRLOGSTATUS		0xa08
+#define	AI_ERRLOGADDRLO		0xa0c
+#define	AI_ERRLOGADDRHI		0xa10
+#define	AI_ERRLOGID		0xa14
+#define	AI_ERRLOGUSER		0xa18
+#define	AI_ERRLOGFLAGS		0xa1c
+#define	AI_INTSTATUS		0xa00
+#define	AI_CONFIG		0xe00
+#define	AI_ITCR			0xf00
+#define	AI_ITIPOOBA		0xf10
+#define	AI_ITIPOOBB		0xf14
+#define	AI_ITIPOOBC		0xf18
+#define	AI_ITIPOOBD		0xf1c
+#define	AI_ITIPOOBAOUT		0xf30
+#define	AI_ITIPOOBBOUT		0xf34
+#define	AI_ITIPOOBCOUT		0xf38
+#define	AI_ITIPOOBDOUT		0xf3c
+#define	AI_ITOPOOBA		0xf50
+#define	AI_ITOPOOBB		0xf54
+#define	AI_ITOPOOBC		0xf58
+#define	AI_ITOPOOBD		0xf5c
+#define	AI_ITOPOOBAIN		0xf70
+#define	AI_ITOPOOBBIN		0xf74
+#define	AI_ITOPOOBCIN		0xf78
+#define	AI_ITOPOOBDIN		0xf7c
+#define	AI_ITOPRESET		0xf90
+#define	AI_PERIPHERIALID4	0xfd0
+#define	AI_PERIPHERIALID5	0xfd4
+#define	AI_PERIPHERIALID6	0xfd8
+#define	AI_PERIPHERIALID7	0xfdc
+#define	AI_PERIPHERIALID0	0xfe0
+#define	AI_PERIPHERIALID1	0xfe4
+#define	AI_PERIPHERIALID2	0xfe8
+#define	AI_PERIPHERIALID3	0xfec
+#define	AI_COMPONENTID0		0xff0
+#define	AI_COMPONENTID1		0xff4
+#define	AI_COMPONENTID2		0xff8
+#define	AI_COMPONENTID3		0xffc
+
+
+#define	AIRC_RESET		1
+
+
+#define	AICFG_OOB		0x00000020
+#define	AICFG_IOS		0x00000010
+#define	AICFG_IOC		0x00000008
+#define	AICFG_TO		0x00000004
+#define	AICFG_ERRL		0x00000002
+#define	AICFG_RST		0x00000001
+
+
+#define OOB_SEL_OUTEN_B_5	15
+#define OOB_SEL_OUTEN_B_6	23
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 0000000..ce45c50
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,121 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h,v 13.25.10.3 2010-12-22 23:47:26 Exp $
+ */
+
+#ifndef _bcmcdc_h_
+#define	_bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+	uint32 cmd;      
+	uint32 len;      
+	uint32 flags;    
+	uint32 status;   
+} cdc_ioctl_t;
+
+
+#define CDC_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+
+#define CDCL_IOC_OUTLEN_MASK   0x0000FFFF  
+					   
+#define CDCL_IOC_OUTLEN_SHIFT  0
+#define CDCL_IOC_INLEN_MASK    0xFFFF0000   
+#define CDCL_IOC_INLEN_SHIFT   16
+
+
+#define CDCF_IOC_ERROR		0x01	
+#define CDCF_IOC_SET		0x02	
+#define CDCF_IOC_OVL_IDX_MASK	0x3c	
+#define CDCF_IOC_OVL_RSV	0x40	
+#define CDCF_IOC_OVL		0x80	
+#define CDCF_IOC_ACTION_MASK	0xfe	
+#define CDCF_IOC_ACTION_SHIFT	1	
+#define CDCF_IOC_IF_MASK	0xF000	
+#define CDCF_IOC_IF_SHIFT	12
+#define CDCF_IOC_ID_MASK	0xFFFF0000	
+#define CDCF_IOC_ID_SHIFT	16		
+
+#define CDC_IOC_IF_IDX(flags)	(((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags)	(((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+
+
+#define	BDC_HEADER_LEN		4
+
+#define BDC_PROTO_VER_1		1	
+#define BDC_PROTO_VER		2	
+
+#define BDC_FLAG_VER_MASK	0xf0	
+#define BDC_FLAG_VER_SHIFT	4	
+
+#define BDC_FLAG__UNUSED	0x03	
+#define BDC_FLAG_SUM_GOOD	0x04	
+#define BDC_FLAG_SUM_NEEDED	0x08	
+
+#define BDC_PRIORITY_MASK	0x7
+
+#define BDC_FLAG2_FC_FLAG	0x10	
+									
+#define BDC_PRIORITY_FC_SHIFT	4		
+
+#define BDC_FLAG2_IF_MASK	0x0f	
+#define BDC_FLAG2_IF_SHIFT	0
+#define BDC_FLAG2_PAD_MASK		0xf0
+#define BDC_FLAG_PAD_MASK		0x03
+#define BDC_FLAG2_PAD_SHIFT		2
+#define BDC_FLAG_PAD_SHIFT		0
+#define BDC_FLAG2_PAD_IDX		0x3c
+#define BDC_FLAG_PAD_IDX		0x03
+#define BDC_GET_PAD_LEN(hdr) \
+	((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+	((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+	(((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+	((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+	(((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#define BDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+struct bdc_header {
+	uint8	flags;			
+	uint8	priority;		
+	uint8	flags2;
+	uint8	dataOffset;
+};
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 0000000..da1fd5e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,196 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdefs.h,v 13.68.2.8 2011-01-08 04:04:19 Exp $
+ */
+
+
+#ifndef	_bcmdefs_h_
+#define	_bcmdefs_h_
+
+
+
+#define bcmreclaimed 		0
+#define _data	_data
+#define _fn	_fn
+#define _data	_data
+#define _fn		_fn
+#define _fn	_fn
+#define CONST	const
+#define BCMFASTPATH
+
+
+
+
+#define _data	_data
+#define _fn		_fn
+#define _fn	_fn
+#define STATIC	static
+
+
+#define	SI_BUS			0	
+#define	PCI_BUS			1	
+#define	PCMCIA_BUS		2	
+#define SDIO_BUS		3	
+#define JTAG_BUS		4	
+#define USB_BUS			5	
+#define SPI_BUS			6	
+#define RPC_BUS			7	
+
+
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) 	(BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) 	(bus)
+#endif
+
+
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) 	(BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) 	(bus)
+#endif
+
+
+
+#if defined(BCMSPROMBUS)
+#define SPROMBUS	(BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS	(PCMCIA_BUS)
+#else
+#define SPROMBUS	(PCI_BUS)
+#endif
+
+
+#ifdef BCMCHIPID
+#define CHIPID(chip)	(BCMCHIPID)
+#else
+#define CHIPID(chip)	(chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev)	(BCMCHIPREV)
+#else
+#define CHIPREV(rev)	(rev)
+#endif
+
+
+#define DMADDR_MASK_32 0x0		
+#define DMADDR_MASK_30 0xc0000000	
+#define DMADDR_MASK_0  0xffffffff	
+
+#define	DMADDRWIDTH_30  30 
+#define	DMADDRWIDTH_32  32 
+#define	DMADDRWIDTH_63  63 
+#define	DMADDRWIDTH_64  64 
+
+#ifdef BCMDMA64OSL
+typedef struct {
+	uint32 loaddr;
+	uint32 hiaddr;
+} dma64addr_t;
+
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) ((_pa).hiaddr)
+#define PHYSADDRHISET(_pa, _val) \
+	do { \
+		(_pa).hiaddr = (_val);		\
+	} while (0)
+#define PHYSADDRLO(_pa) ((_pa).loaddr)
+#define PHYSADDRLOSET(_pa, _val) \
+	do { \
+		(_pa).loaddr = (_val);		\
+	} while (0)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+	do { \
+		(_pa) = (_val);			\
+	} while (0)
+#endif 
+
+
+typedef struct  {
+	dmaaddr_t addr;
+	uint32	  length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 4
+
+
+typedef struct {
+	void *oshdmah; 
+	uint origsize; 
+	uint nsegs;
+	hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+
+#define BCMEXTRAHDROOM 220
+#else
+#define BCMEXTRAHDROOM 172
+#endif
+
+
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif 
+
+
+#define BITFIELD_MASK(width) \
+		(((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+		(((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+		(((val) & (~(field ## _M << field ## _S))) | \
+		 ((unsigned)(bits) << field ## _S))
+
+
+#ifdef BCMSMALL
+#undef	BCMSPACE
+#define bcmspace	FALSE	
+#else
+#define	BCMSPACE
+#define bcmspace	TRUE	
+#endif
+
+
+#define	MAXSZ_NVRAM_VARS	4096
+
+#define LOCATOR_EXTERN static
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 0000000..4f707c0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,182 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h,v 13.285.2.39 2011-02-04 05:03:16 Exp $
+ */
+
+
+#ifndef	_BCMDEVS_H
+#define	_BCMDEVS_H
+
+
+#define	VENDOR_EPIGRAM		0xfeda
+#define	VENDOR_BROADCOM		0x14e4
+#define VENDOR_SI_IMAGE		0x1095		
+#define VENDOR_TI		0x104c		
+#define VENDOR_RICOH		0x1180		
+#define VENDOR_JMICRON		0x197b
+
+
+
+#define	VENDOR_BROADCOM_PCMCIA	0x02d0
+
+
+#define	VENDOR_BROADCOM_SDIO	0x00BF
+
+
+#define BCM_DNGL_VID		0x0a5c
+#define BCM_DNGL_BL_PID_4328	0xbd12
+#define BCM_DNGL_BL_PID_4322	0xbd13
+#define BCM_DNGL_BL_PID_4319    0xbd16
+#define BCM_DNGL_BL_PID_43236   0xbd17
+#define BCM_DNGL_BL_PID_4332	0xbd18
+#define BCM_DNGL_BL_PID_4330	0xbd19
+#define BCM_DNGL_BL_PID_43239   0xbd1b
+#define BCM_DNGL_BDC_PID	0x0bdc
+#define BCM_DNGL_JTAG_PID	0x4a44
+#define	BCM4325_D11DUAL_ID	0x431b		
+#define	BCM4325_D11G_ID		0x431c		
+#define	BCM4325_D11A_ID		0x431d		
+#define	BCM4321_D11N_ID		0x4328		
+#define	BCM4321_D11N2G_ID	0x4329		
+#define	BCM4321_D11N5G_ID	0x432a		
+#define BCM4322_D11N_ID		0x432b		
+#define BCM4322_D11N2G_ID	0x432c		
+#define BCM4322_D11N5G_ID	0x432d		
+#define BCM4329_D11N_ID		0x432e		
+#define BCM4329_D11N2G_ID	0x432f		
+#define BCM4329_D11N5G_ID	0x4330		
+#define	BCM4315_D11DUAL_ID	0x4334		
+#define	BCM4315_D11G_ID		0x4335		
+#define	BCM4315_D11A_ID		0x4336		
+#define BCM4319_D11N_ID		0x4337		
+#define BCM4319_D11N2G_ID	0x4338		
+#define BCM4319_D11N5G_ID	0x4339		
+#define BCM43231_D11N2G_ID	0x4340		
+#define BCM43221_D11N2G_ID	0x4341		
+#define BCM43222_D11N_ID	0x4350		
+#define BCM43222_D11N2G_ID	0x4351		
+#define BCM43222_D11N5G_ID	0x4352		
+#define BCM43224_D11N_ID	0x4353		
+#define BCM43224_D11N_ID_VEN1	0x0576		
+#define BCM43226_D11N_ID	0x4354		
+#define BCM43236_D11N_ID	0x4346		
+#define BCM43236_D11N2G_ID	0x4347		
+#define BCM43236_D11N5G_ID	0x4348		
+#define BCM43225_D11N2G_ID	0x4357		
+#define BCM43421_D11N_ID	0xA99D		
+#define BCM4313_D11N2G_ID	0x4727		
+#define BCM4330_D11N_ID         0x4360          
+#define BCM4330_D11N2G_ID       0x4361          
+#define BCM4330_D11N5G_ID       0x4362          
+#define BCM4336_D11N_ID		0x4343		
+#define BCM6362_D11N_ID		0x435f		
+#define BCM4331_D11N_ID		0x4331		
+#define BCM4331_D11N2G_ID	0x4332		
+#define BCM4331_D11N5G_ID	0x4333		
+#define BCM43237_D11N_ID	0x4355		
+#define BCM43237_D11N5G_ID	0x4356		
+#define BCM43227_D11N2G_ID	0x4358		
+#define BCM43228_D11N_ID		0x4359		
+#define BCM43228_D11N5G_ID	0x435a		 
+#define BCM43362_D11N_ID	0x4363		
+#define BCM43239_D11N_ID	0x4370		
+
+
+#define SDIOH_FPGA_ID		0x43f2		
+#define SPIH_FPGA_ID		0x43f5		
+#define	BCM4710_DEVICE_ID	0x4710		
+#define BCM27XX_SDIOH_ID	0x2702		
+#define PCIXX21_FLASHMEDIA0_ID	0x8033		
+#define PCIXX21_SDIOH0_ID	0x8034		
+#define PCIXX21_FLASHMEDIA_ID	0x803b		
+#define PCIXX21_SDIOH_ID	0x803c		
+#define R5C822_SDIOH_ID		0x0822		
+#define JMICRON_SDIOH_ID	0x2381		
+
+
+#define	BCM4306_CHIP_ID		0x4306		
+#define	BCM4311_CHIP_ID		0x4311		
+#define	BCM43111_CHIP_ID	43111		
+#define	BCM43112_CHIP_ID	43112		
+#define	BCM4312_CHIP_ID		0x4312		
+#define BCM4313_CHIP_ID		0x4313		
+#define	BCM4315_CHIP_ID		0x4315		
+#define	BCM4318_CHIP_ID		0x4318		
+#define	BCM4319_CHIP_ID		0x4319		
+#define	BCM4320_CHIP_ID		0x4320		
+#define	BCM4321_CHIP_ID		0x4321		
+#define	BCM4322_CHIP_ID		0x4322		
+#define	BCM43221_CHIP_ID	43221		
+#define	BCM43222_CHIP_ID	43222		
+#define	BCM43224_CHIP_ID	43224		
+#define	BCM43225_CHIP_ID	43225		
+#define	BCM43227_CHIP_ID	43227		
+#define	BCM43228_CHIP_ID	43228		
+#define	BCM43226_CHIP_ID	43226		
+#define	BCM43231_CHIP_ID	43231		
+#define	BCM43234_CHIP_ID	43234		
+#define	BCM43235_CHIP_ID	43235		
+#define	BCM43236_CHIP_ID	43236		
+#define	BCM43237_CHIP_ID	43237		
+#define	BCM43238_CHIP_ID	43238		
+#define	BCM43239_CHIP_ID	43239		
+#define	BCM43420_CHIP_ID	43420		
+#define	BCM43421_CHIP_ID	43421		
+#define	BCM43428_CHIP_ID	43428		
+#define	BCM43431_CHIP_ID	43431		
+#define	BCM4325_CHIP_ID		0x4325		
+#define	BCM4328_CHIP_ID		0x4328		
+#define	BCM4329_CHIP_ID		0x4329		
+#define	BCM4331_CHIP_ID		0x4331		
+#define BCM4336_CHIP_ID		0x4336		
+#define BCM43362_CHIP_ID	43362		
+#define BCM4330_CHIP_ID		0x4330		
+#define	BCM4402_CHIP_ID		0x4402		
+#define	BCM4704_CHIP_ID		0x4704		
+#define	BCM4710_CHIP_ID		0x4710		
+#define	BCM4712_CHIP_ID		0x4712		
+#define BCM4785_CHIP_ID		0x4785		
+#define	BCM5350_CHIP_ID		0x5350		
+#define	BCM5352_CHIP_ID		0x5352		
+#define	BCM5354_CHIP_ID		0x5354		
+#define BCM5365_CHIP_ID		0x5365		
+
+
+#define	BCM4303_PKG_ID		2		
+#define	BCM4309_PKG_ID		1		
+#define	BCM4712LARGE_PKG_ID	0		
+#define	BCM4712SMALL_PKG_ID	1		
+#define	BCM4712MID_PKG_ID	2		
+#define BCM4328USBD11G_PKG_ID	2		
+#define BCM4328USBDUAL_PKG_ID	3		
+#define BCM4328SDIOD11G_PKG_ID	4		
+#define BCM4328SDIODUAL_PKG_ID	5		
+#define BCM4329_289PIN_PKG_ID	0		
+#define BCM4329_182PIN_PKG_ID	1		
+#define BCM5354E_PKG_ID		1		
+#define HDLSIM5350_PKG_ID	1		
+#define HDLSIM_PKG_ID		14		
+#define HWSIM_PKG_ID		15		
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 0000000..04b07ec
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,279 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *  $Id: bcmendian.h,v 1.36 2009-11-09 05:29:43 Exp $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+
+#define BCMSWAP16(val) \
+	((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+		  (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+
+#define BCMSWAP32(val) \
+	((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+		  (((uint32)(val) & (uint32)0x0000ff00U) <<  8) | \
+		  (((uint32)(val) & (uint32)0x00ff0000U) >>  8) | \
+		  (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+
+#define BCMSWAP32BY16(val) \
+	((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+		  (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define	hton16(i) bcmswap16(i)
+#define	HTON32(i) BCMSWAP32(i)
+#define	hton32(i) bcmswap32(i)
+#define	NTOH16(i) BCMSWAP16(i)
+#define	ntoh16(i) bcmswap16(i)
+#define	NTOH32(i) BCMSWAP32(i)
+#define	ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#endif 
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+
+#define load32_ua(a)		ltoh32_ua(a)
+#define store32_ua(a, v)	htol32_ua_store(v, a)
+#define load16_ua(a)		ltoh16_ua(a)
+#define store16_ua(a, v)	htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp)	((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp)	((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp)	(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp)	(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#ifdef __GNUC__
+
+
+
+#define bcmswap16(val) ({ \
+	uint16 _val = (val); \
+	BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+	uint16 *_buf = (uint16 *)(buf); \
+	uint _wds = (len) / 2; \
+	while (_wds--) { \
+		*_buf = bcmswap16(*_buf); \
+		_buf++; \
+	} \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = (_val >> 8) & 0xff; \
+	_bytes[2] = (_val >> 16) & 0xff; \
+	_bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 8; \
+	_bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 24; \
+	_bytes[1] = (_val >> 16) & 0xff; \
+	_bytes[2] = (_val >> 8) & 0xff; \
+	_bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH32_UA(_bytes); \
+})
+
+#else 
+
+
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+	return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+	return BCMSWAP32(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+	return BCMSWAP32BY16(val);
+}
+
+
+
+
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+	len = len / 2;
+
+	while (len--) {
+		*buf = bcmswap16(*buf);
+		buf++;
+	}
+}
+
+
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = val >> 8;
+}
+
+
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = (val >> 16) & 0xff;
+	bytes[3] = val >> 24;
+}
+
+
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val >> 8;
+	bytes[1] = val & 0xff;
+}
+
+
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val >> 24;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+	return _LTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+	return _LTOH32_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+	return _NTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+	return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif 
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 0000000..fd148c5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,181 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h,v 13.15.112.1 2010-11-15 18:22:12 Exp $
+ */
+#ifndef	_BCM_PCI_SPI_H
+#define	_BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	uint32 spih_ctrl;		/* 0x00 SPI Control Register */
+	uint32 spih_stat;		/* 0x04 SPI Status Register */
+	uint32 spih_data;		/* 0x08 SPI Data Register, 32-bits wide */
+	uint32 spih_ext;		/* 0x0C SPI Extension Register */
+	uint32 PAD[4];			/* 0x10-0x1F PADDING */
+
+	uint32 spih_gpio_ctrl;		/* 0x20 SPI GPIO Control Register */
+	uint32 spih_gpio_data;		/* 0x24 SPI GPIO Data Register */
+	uint32 PAD[6];			/* 0x28-0x3F PADDING */
+
+	uint32 spih_int_edge;		/* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+	uint32 spih_int_pol;		/* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+							/* 1=Active High) */
+	uint32 spih_int_mask;		/* 0x48 SPI Interrupt Mask */
+	uint32 spih_int_status;		/* 0x4C SPI Interrupt Status */
+	uint32 PAD[4];			/* 0x50-0x5F PADDING */
+
+	uint32 spih_hex_disp;		/* 0x60 SPI 4-digit hex display value */
+	uint32 spih_current_ma;		/* 0x64 SPI SD card current consumption in mA */
+	uint32 PAD[1];			/* 0x68 PADDING */
+	uint32 spih_disp_sel;		/* 0x6c SPI 4-digit hex display mode select (1=current) */
+	uint32 PAD[4];			/* 0x70-0x7F PADDING */
+	uint32 PAD[8];			/* 0x80-0x9F PADDING */
+	uint32 PAD[8];			/* 0xA0-0xBF PADDING */
+	uint32 spih_pll_ctrl;	/* 0xC0 PLL Control Register */
+	uint32 spih_pll_status;	/* 0xC4 PLL Status Register */
+	uint32 spih_xtal_freq;	/* 0xC8 External Clock Frequency in units of 10000Hz */
+	uint32 spih_clk_count;	/* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+	uint32 cfg_space[0x40];		/* 0x000-0x0FF PCI Configuration Space (Read Only) */
+	uint32 P_IMG_CTRL0;		/* 0x100 PCI Image0 Control Register */
+
+	uint32 P_BA0;			/* 0x104 32 R/W PCI Image0 Base Address register */
+	uint32 P_AM0;			/* 0x108 32 R/W PCI Image0 Address Mask register */
+	uint32 P_TA0;			/* 0x10C 32 R/W PCI Image0 Translation Address register */
+	uint32 P_IMG_CTRL1;		/* 0x110 32 R/W PCI Image1 Control register */
+	uint32 P_BA1;			/* 0x114 32 R/W PCI Image1 Base Address register */
+	uint32 P_AM1;			/* 0x118 32 R/W PCI Image1 Address Mask register */
+	uint32 P_TA1;			/* 0x11C 32 R/W PCI Image1 Translation Address register */
+	uint32 P_IMG_CTRL2;		/* 0x120 32 R/W PCI Image2 Control register */
+	uint32 P_BA2;			/* 0x124 32 R/W PCI Image2 Base Address register */
+	uint32 P_AM2;			/* 0x128 32 R/W PCI Image2 Address Mask register */
+	uint32 P_TA2;			/* 0x12C 32 R/W PCI Image2 Translation Address register */
+	uint32 P_IMG_CTRL3;		/* 0x130 32 R/W PCI Image3 Control register */
+	uint32 P_BA3;			/* 0x134 32 R/W PCI Image3 Base Address register */
+	uint32 P_AM3;			/* 0x138 32 R/W PCI Image3 Address Mask register */
+	uint32 P_TA3;			/* 0x13C 32 R/W PCI Image3 Translation Address register */
+	uint32 P_IMG_CTRL4;		/* 0x140 32 R/W PCI Image4 Control register */
+	uint32 P_BA4;			/* 0x144 32 R/W PCI Image4 Base Address register */
+	uint32 P_AM4;			/* 0x148 32 R/W PCI Image4 Address Mask register */
+	uint32 P_TA4;			/* 0x14C 32 R/W PCI Image4 Translation Address register */
+	uint32 P_IMG_CTRL5;		/* 0x150 32 R/W PCI Image5 Control register */
+	uint32 P_BA5;			/* 0x154 32 R/W PCI Image5 Base Address register */
+	uint32 P_AM5;			/* 0x158 32 R/W PCI Image5 Address Mask register */
+	uint32 P_TA5;			/* 0x15C 32 R/W PCI Image5 Translation Address register */
+	uint32 P_ERR_CS;		/* 0x160 32 R/W PCI Error Control and Status register */
+	uint32 P_ERR_ADDR;		/* 0x164 32 R PCI Erroneous Address register */
+	uint32 P_ERR_DATA;		/* 0x168 32 R PCI Erroneous Data register */
+
+	uint32 PAD[5];			/* 0x16C-0x17F PADDING */
+
+	uint32 WB_CONF_SPC_BAR;		/* 0x180 32 R WISHBONE Configuration Space Base Address */
+	uint32 W_IMG_CTRL1;		/* 0x184 32 R/W WISHBONE Image1 Control register */
+	uint32 W_BA1;			/* 0x188 32 R/W WISHBONE Image1 Base Address register */
+	uint32 W_AM1;			/* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+	uint32 W_TA1;			/* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+	uint32 W_IMG_CTRL2;		/* 0x194 32 R/W WISHBONE Image2 Control register */
+	uint32 W_BA2;			/* 0x198 32 R/W WISHBONE Image2 Base Address register */
+	uint32 W_AM2;			/* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+	uint32 W_TA2;			/* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+	uint32 W_IMG_CTRL3;		/* 0x1A4 32 R/W WISHBONE Image3 Control register */
+	uint32 W_BA3;			/* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+	uint32 W_AM3;			/* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+	uint32 W_TA3;			/* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+	uint32 W_IMG_CTRL4;		/* 0x1B4 32 R/W WISHBONE Image4 Control register */
+	uint32 W_BA4;			/* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+	uint32 W_AM4;			/* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+	uint32 W_TA4;			/* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+	uint32 W_IMG_CTRL5;		/* 0x1C4 32 R/W WISHBONE Image5 Control register */
+	uint32 W_BA5;			/* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+	uint32 W_AM5;			/* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+	uint32 W_TA5;			/* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+	uint32 W_ERR_CS;		/* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+	uint32 W_ERR_ADDR;		/* 0x1D8 32 R WISHBONE Erroneous Address register */
+	uint32 W_ERR_DATA;		/* 0x1DC 32 R WISHBONE Erroneous Data register */
+	uint32 CNF_ADDR;		/* 0x1E0 32 R/W Configuration Cycle register */
+	uint32 CNF_DATA;		/* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+	uint32 INT_ACK;			/* 0x1E8 32 R Interrupt Acknowledge register */
+	uint32 ICR;			/* 0x1EC 32 R/W Interrupt Control register */
+	uint32 ISR;			/* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN		(1 << 0)	/* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN	(1 << 1)	/* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN	(1 << 2)	/* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN	(1 << 3)	/* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN	(1 << 4)	/* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET	(1U << 31)	/* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST		(1 << 0)	/* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST	(1 << 1)	/* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST	(1 << 2)	/* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST	(1 << 3)	/* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST	(1 << 4)	/* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR		(1 << 0)	/* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR		(1 << 1)	/* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR		(1 << 2)	/* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS			(1 << 0)	/* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER		(1 << 1)	/* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT	(1 << 2)	/* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK		0x30		/* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT	4		/* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL		(1 << 3)	/* SPI Write FIFO Full */
+#define SPIH_WFEMPTY		(1 << 2)	/* SPI Write FIFO Empty */
+#define SPIH_RFFULL		(1 << 1)	/* SPI Read FIFO Full */
+#define SPIH_RFEMPTY		(1 << 0)	/* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK		(1U << 31)	/* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK		(1 << 1)	/* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED		(1 << 3)	/* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND		0xf4240		/* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 0000000..a3985cf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h,v 13.5 2007-09-14 22:00:59 Exp $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define	BCMPERF_GETICACHE_MISS(x)	((x) = 0)
+#define	BCMPERF_GETICACHE_HIT(x)	((x) = 0)
+#define	BCMPERF_GETINSTRCOUNT(x)	((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 0000000..5fda5e9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,120 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h,v 13.17.86.2 2010-12-23 01:13:20 Exp $
+ */
+
+#ifndef	_sdio_api_h_
+#define	_sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS                          (0x00)
+#define SDIOH_API_RC_FAIL	                      (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL   0       /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND   1       /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU  2       /* Cut-through command */
+
+#define SDIOH_DATA_PIO          0       /* PIO mode */
+#define SDIOH_DATA_DMA          1       /* DMA mode */
+
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+/* attach, return handler on success, NULL if failed.
+ *  The handler shall be provided by all subsequent calls. No local cache
+ *  cfghdl points to the starting address of pci device mapped memory
+ */
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+	uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+	uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+	void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                          void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+/* Helper function */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 0000000..6131d8a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,211 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ *     export functions to client drivers
+ *     abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h,v 13.46.52.3 2010-10-19 00:41:44 Exp $
+ */
+
+#ifndef	_bcmsdh_h_
+#define	_bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL	0x0001 /* Error */
+#define BCMSDH_INFO_VAL		0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+/* Attach and build an interface to the underlying SD host driver.
+ *  - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
+ *  - Returns the bcmsdh handle and virtual address base for register access.
+ *    The returned handle should be used in all subsequent calls, but the bcmsh
+ *    implementation may maintain a single "default" handle (e.g. the first or
+ *    most recent one) to enable single-instance implementations to pass NULL.
+ */
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+#ifdef BCMLXSDMMC
+extern int bcmsdh_claim_host_and_lock(void *sdh);
+extern int bcmsdh_release_host_and_unlock(void *sdh);
+#endif /* BCMLXSDMMC */
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ *   fn:   function number
+ *   addr: unmodified SDIO-space address
+ *   data: data byte to write
+ *   err:  pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ *   fn:     function whose CIS is being requested (0 is common CIS)
+ *   cis:    pointer to memory location to place results
+ *   length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ *   addr: backplane address (i.e. >= regsva from attach)
+ *   size: register width in bytes (2 or 4)
+ *   data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   addr:     backplane address (i.e. >= regsva from attach)
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete, void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE	0x1	/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED	0x2	/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC	0x4	/* Async request (vs. sync request) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING	1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+                           void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+/* callback functions */
+typedef struct {
+	/* attach to device */
+	void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+	                uint16 func, uint bustype, void * regsva, osl_t * osh,
+	                void * param);
+	/* detach from device */
+	void (*detach)(void *ch);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_register_oob_intr(void * dhdp);
+extern void bcmsdh_unregister_oob_intr(void);
+extern void bcmsdh_oob_intr_set(bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+#endif	/* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 0000000..d188c4e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,122 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h,v 13.5.88.1 2010-12-23 01:13:20 Exp $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+/* Allocate/init/free per-OS private data */
+extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
+extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4		2
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+struct sdioh_info {
+	osl_t 		*osh;			/* osh handler */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint 		irq;			/* Client irq */
+	int 		intrcount;		/* Client interrupts */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	uint		max_dma_len;
+	uint		max_dma_descriptors;	/* DMA Descriptors supported by this controller. */
+//	SDDMA_DESCRIPTOR	SGList[32];	/* Scatter/Gather DMA List */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+	sdioh_info_t	*sd;
+	struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 0000000..ee29b5c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,274 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h,v 13.4.90.2 2010-05-12 04:14:25 Exp $
+ */
+
+#ifndef	_bcmsdpcm_h_
+#define	_bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK	I_SMB_SW0	/* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK	I_SMB_SW1	/* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB	I_SMB_SW2	/* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT	I_SMB_SW3	/* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL      (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK	0x0000000f	/* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK	0x00ff0000	/* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT	16		/* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL    (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON	(1 << 0)	/* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE	(1 << 1)	/* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND	(1 << 2)	/* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT	(1 << 3)	/* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK	0x0000000f	/* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	0x01	/* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY	0x02	/* we're ready to to talk to host after enable */
+#define HMB_DATA_FC		0x04	/* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY	0x08	/* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT		0x10	/* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK	0xff000000	/* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT	24		/* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000	/* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT	16		/* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK		0x000000ff	/* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK		0x00000f00	/* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT		8		/* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK		0x0000f000	/* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT		12		/* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK		0x00ff0000	/* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT		16		/* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET		2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET		3		/* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) 		(((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+
+#define SDPCM_FCMASK_OFFSET		4		/* Flow control */
+#define SDPCM_FCMASK_VALUE(p)		(((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET		5		/* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p)		(((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET		6		/* Version # */
+#define SDPCM_VERSION_VALUE(p)		(((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET		7		/* Spare */
+#define SDPCM_UNUSED_VALUE(p)		(((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN	8	/* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL	0	/* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL	1	/* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL	2	/* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL	3	/* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL	15	/* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL	15
+
+#define SDPCM_SEQUENCE_WRAP	256	/* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0	0x01
+#define SDPCM_FLAG_RESVD1	0x02
+#define SDPCM_FLAG_GSPI_TXENAB	0x04
+#define SDPCM_FLAG_GLOMDESC	0x08	/* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG	(SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p)	(((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN	4	/* Generally: Cmd(1), Ext(1), Len(2);
+					 * Semantics of Ext byte depend on command.
+					 * Len is current or requested frame length, not
+					 * including test header; sent little-endian.
+					 */
+#define SDPCM_TEST_DISCARD	0x01	/* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ	0x02	/* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP	0x03	/* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST	0x04	/* Receiver to send a burst. Ext is a frame count */
+#define SDPCM_TEST_SEND		0x05	/* Receiver sets send mode. Ext is boolean on/off */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id)	((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+	uint32 cmd52rd;		/* Cmd52RdCount, SDIO: cmd52 reads */
+	uint32 cmd52wr;		/* Cmd52WrCount, SDIO: cmd52 writes */
+	uint32 cmd53rd;		/* Cmd53RdCount, SDIO: cmd53 reads */
+	uint32 cmd53wr;		/* Cmd53WrCount, SDIO: cmd53 writes */
+	uint32 abort;		/* AbortCount, SDIO: aborts */
+	uint32 datacrcerror;	/* DataCrcErrorCount, SDIO: frames w/CRC error */
+	uint32 rdoutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+	uint32 wroutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+	uint32 writebusy;	/* WriteBusyCount, SDIO: device asserted "busy" */
+	uint32 readwait;	/* ReadWaitCount, SDIO: no data ready for a read cmd */
+	uint32 readterm;	/* ReadTermCount, SDIO: read frame termination cmds */
+	uint32 writeterm;	/* WriteTermCount, SDIO: write frames termination cmds */
+	uint32 rxdescuflo;	/* receive descriptor underflows */
+	uint32 rxfifooflo;	/* receive fifo overflows */
+	uint32 txfifouflo;	/* transmit fifo underflows */
+	uint32 runt;		/* runt (too short) frames recv'd from bus */
+	uint32 badlen;		/* frame's rxh len does not match its hw tag len */
+	uint32 badcksum;	/* frame's hw tag chksum doesn't agree with len value */
+	uint32 seqbreak;	/* break in sequence # space from one rx frame to the next */
+	uint32 rxfcrc;		/* frame rx header indicates crc error */
+	uint32 rxfwoos;		/* frame rx header indicates write out of sync */
+	uint32 rxfwft;		/* frame rx header indicates write frame termination */
+	uint32 rxfabort;	/* frame rx header indicates frame aborted */
+	uint32 woosint;		/* write out of sync interrupt */
+	uint32 roosint;		/* read out of sync interrupt */
+	uint32 rftermint;	/* read frame terminate interrupt */
+	uint32 wftermint;	/* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val)	((var) == (val))
+#define SDIODREV_GE(var, val)	((var) >= (val))
+#define SDIODREV_GT(var, val)	((var) > (val))
+#define SDIODREV_LT(var, val)	((var) < (val))
+#define SDIODREV_LE(var, val)	((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+	(SDIODREV_LT((h)->corerev, 1) ? \
+	 SDIODDMAREG32((h), (dir), (chnl)) : \
+	 SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODDMAREG(h, dir, chnl) : \
+	 PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+	(SDIODREV_LT((corerev), 1) ? \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+	((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODFIFOREG(h, corerev) : \
+	 PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION       0x0001
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+#define SDPCM_SHARED_IN_BRPT       0x0800
+#define SDPCM_SHARED_SET_BRPT      0x1000
+#define SDPCM_SHARED_PENDING_BRPT  0x2000
+
+typedef struct {
+	uint32	flags;
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hndrte_cons_t */
+	uint32  msgtrace_addr;
+	uint32  brpt_addr;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+/* Function can be used to notify host of FW halt */
+extern void sdpcmd_fwhalt(void);
+
+#endif	/* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 0000000..0bff355
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,135 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h,v 13.11.86.1 2010-11-15 18:14:56 Exp $
+ */
+#ifndef	_BCM_SD_SPI_H
+#define	_BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint		bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of sdspi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	bool		got_hcint;		/* Host Controller interrupt. */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current register transfer size */
+	uint32		cmd53_wr_data;		/* Used to pass CMD53 write data */
+	uint32		card_response;		/* Used to pass back response status byte */
+	uint32		card_rsp_data;		/* Used to pass back response data word */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 0000000..0f4c026
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,267 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h,v 13.21.2.6 2010-11-15 18:14:01 Exp $
+ */
+#ifndef	_BCM_SD_STD_H
+#define	_BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+#define SDIOH_MODE_SD1		1
+#define SDIOH_MODE_SD4		2
+
+#define MAX_SLOTS 6 	/* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ	0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK	1
+#define SDIOH_TYPE_BCM27XX	2
+#define SDIOH_TYPE_TI_PCIXX21	4	/* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822	5	/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON	6	/* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS   0x00001E00
+
+#define RETRIES_LARGE 100000
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+#define USE_FIFO		0x8	/* Fifo vs non-fifo */
+
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+#define HC_INTR_RETUNING	0x1000
+
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint32 curr_caps;                    	/* max current capabilities reg */
+
+	osl_t 		*osh;			/* osh handler */
+	volatile char 	*mem_space;		/* pci device memory va */
+	uint		lockcount; 		/* nest count of sdstd_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint		target_dev;		/* Target device ID */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	int 		intrcount;		/* Client interrupts */
+	int 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current transfer */
+	uint16 		card_rca;		/* Current Address */
+	int8		sd_dma_mode;		/* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;		/* DMA Buffer virtual address */
+	ulong		dma_phys;		/* DMA Buffer physical address */
+	void		*adma2_dscr_buf;	/* ADMA2 Descriptor Buffer virtual address */
+	ulong		adma2_dscr_phys;	/* ADMA2 Descriptor Buffer physical address */
+
+	/* adjustments needed to make the dma align properly */
+	void		*dma_start_buf;
+	ulong		dma_start_phys;
+	uint		alloced_dma_size;
+	void		*adma2_dscr_start_buf;
+	ulong		adma2_dscr_start_phys;
+	uint		alloced_adma2_dscr_size;
+
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	bool		got_hcint;		/* local interrupt flag */
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int 	host_UHSISupported;		/* whether UHSI is supported for HC. */
+	int 	card_UHSI_voltage_Supported; 	/* whether UHSI is supported for 
+						 * Card in terms of Voltage [1.8 or 3.3]. 
+						 */
+	int	global_UHSI_Supp;	/* type of UHSI support in both host and card.
+					 * HOST_SDR_UNSUPP: capabilities not supported/matched
+					 * HOST_SDR_12_25: SDR12 and SDR25 supported
+					 * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+					 */
+	int	sd3_dat_state; 		/* data transfer state used for retuning check */
+	int	sd3_tun_state; 		/* tuning state used for retuning check */
+	bool	sd3_tuning_reqd; 	/* tuning requirement parameter */
+	uint32	caps3;			/* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+};
+
+#define DMA_MODE_NONE	0
+#define DMA_MODE_SDMA	1
+#define DMA_MODE_ADMA1	2
+#define DMA_MODE_ADMA2	3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO	-1
+
+#define USE_DMA(sd)		((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE			0
+#define SDIOH_ADMA1_MODE		1
+#define SDIOH_ADMA2_MODE		2
+#define SDIOH_ADMA2_64_MODE		3
+
+#define ADMA2_ATTRIBUTE_VALID		(1 << 0)	/* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END			(1 << 1)	/* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT			(1 << 2)	/* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP		(0 << 4)	/* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV		(1 << 4)	/* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET		(1 << 4)	/* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN	(2 << 4)	/* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK	(3 << 4)	/* Link Descriptor */
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 			0
+#define TUNING_START 			1
+#define TUNING_START_AFTER_DAT 	2
+#define TUNING_ONGOING 			3
+
+#define DATA_TRANSFER_IDLE 		0
+#define DATA_TRANSFER_ONGOING	1
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+	uint32 len_attr;
+	uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+	uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 0000000..0eb2a30
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,40 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h,v 13.5.112.1 2010-11-15 18:13:09 Exp $
+ */
+#ifndef	_BCM_SPI_H
+#define	_BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 0000000..f7f2300
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,706 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmutils.h,v 13.236.2.16 2011-01-26 00:45:06 Exp $
+ */
+
+
+#ifndef	_bcmutils_h_
+#define	_bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src)            strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count)    strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src)            strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define _BCM_U	0x01	
+#define _BCM_L	0x02	
+#define _BCM_D	0x04	
+#define _BCM_C	0x08	
+#define _BCM_P	0x10	
+#define _BCM_S	0x20	
+#define _BCM_X	0x40	
+#define _BCM_SP	0x80	
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x)	(bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c)	((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c)	((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c)	((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c)	((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c)	((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c)	((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c)	((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c)	(bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c)	(bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+
+
+struct bcmstrbuf {
+	char *buf;	
+	unsigned int size;	
+	char *origbuf;	
+	unsigned int origsize;	
+};
+
+
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 	0x20	
+
+
+#define SPINWAIT(exp, us) { \
+	uint countdown = (us) + 9; \
+	while ((exp) && (countdown >= 10)) {\
+		OSL_DELAY(10); \
+		countdown -= 10; \
+	} \
+}
+
+
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT        128	
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC           16	
+#endif
+
+typedef struct pktq_prec {
+	void *head;     
+	void *tail;     
+	uint16 len;     
+	uint16 max;     
+} pktq_prec_t;
+
+
+
+struct pktq {
+	uint16 num_prec;        
+	uint16 hi_prec;         
+	uint16 max;             
+	uint16 len;             
+	
+	struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+
+struct spktq {
+	uint16 num_prec;        
+	uint16 hi_prec;         
+	uint16 max;             
+	uint16 len;             
+	
+	struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool)		((pool) && (pool)->inited)
+#if defined(BCM4329C0)
+#define SHARED_POOL		(pktpool_shared_ptr)
+#else
+#define SHARED_POOL		(pktpool_shared)
+#endif 
+#else 
+#define POOL_ENAB(bus)		0
+#define SHARED_POOL		((struct pktpool *)NULL)
+#endif 
+
+#define PKTPOOL_LEN_MAX		40
+#define PKTPOOL_CB_MAX		3
+
+struct pktpool;
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+	pktpool_cb_t cb;
+	void *arg;
+} pktpool_cbinfo_t;
+
+#ifdef BCMDBG_POOL
+
+#define POOL_IDLE	0
+#define POOL_RXFILL	1
+#define POOL_RXDH	2
+#define POOL_RXD11	3
+#define POOL_TXDH	4
+#define POOL_TXD11	5
+#define POOL_AMPDU	6
+#define POOL_TXENQ	7
+
+typedef struct {
+	void *p;
+	uint32 cycles;
+	uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+	uint8 txdh;	
+	uint8 txd11;	
+	uint8 enq;	
+	uint8 rxdh;	
+	uint8 rxd11;	
+	uint8 rxfill;	
+	uint8 idle;	
+} pktpool_stats_t;
+#endif 
+
+typedef struct pktpool {
+	bool inited;
+	uint16 r;
+	uint16 w;
+	uint16 len;
+	uint16 maxlen;
+	uint16 plen;
+	bool istx;
+	bool empty;
+	uint8 cbtoggle;
+	uint8 cbcnt;
+	uint8 ecbcnt;
+	bool emptycb_disable;
+	pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+	pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+	void *q[PKTPOOL_LEN_MAX + 1];
+
+#ifdef BCMDBG_POOL
+	uint8 dbg_cbcnt;
+	pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+	uint16 dbg_qlen;
+	pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+} pktpool_t;
+
+#if defined(BCM4329C0)
+extern pktpool_t *pktpool_shared_ptr;
+#else
+extern pktpool_t *pktpool_shared;
+#endif 
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern uint16 pktpool_avail(pktpool_t *pktp);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+
+#define POOLPTR(pp)			((pktpool_t *)(pp))
+#define pktpool_len(pp)			(POOLPTR(pp)->len - 1)
+#define pktpool_plen(pp)		(POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp)		(POOLPTR(pp)->maxlen)
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif 
+
+
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+
+
+#define pktq_psetmax(pq, prec, _max)    ((pq)->q[prec].max = (_max))
+#define pktq_plen(pq, prec)             ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec)           ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec)            ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec)           ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec)            ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec)       ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+	ifpkt_cb_t fn, int arg);
+
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+
+
+#define pktq_len(pq)                    ((int)(pq)->len)
+#define pktq_max(pq)                    ((int)(pq)->max)
+#define pktq_avail(pq)                  ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq)                   ((pq)->len >= (pq)->max)
+#define pktq_empty(pq)                  ((pq)->len == 0)
+
+
+#define pktenq(pq, p)		pktq_penq(((struct pktq *)pq), 0, (p))
+#define pktenq_head(pq, p)	pktq_penq_head(((struct pktq *)pq), 0, (p))
+#define pktdeq(pq)		pktq_pdeq(((struct pktq *)pq), 0)
+#define pktdeq_tail(pq)		pktq_pdeq_tail(((struct pktq *)pq), 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+
+
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+#define	PKTPRIO_VDSCP	0x100		
+#define	PKTPRIO_VLAN	0x200		
+#define	PKTPRIO_UPD	0x400		
+#define	PKTPRIO_DSCP	0x800		
+
+
+extern int bcm_atoi(char *s);
+extern ulong bcm_strtoul(char *cp, char **endp, uint base);
+extern char *bcmstrstr(char *haystack, char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(char *p, struct ether_addr *ea);
+
+
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+
+
+extern void bcm_mdelay(uint ms);
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define	bcmlog(fmt, a1, a2)
+#define	bcmdumplog(buf, size)	*buf = '\0'
+#define	bcmdumplogent(buf, idx)	-1
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+
+
+
+typedef struct bcm_iovar {
+	const char *name;	
+	uint16 varid;		
+	uint16 flags;		
+	uint16 type;		
+	uint16 minlen;		
+} bcm_iovar_t;
+
+
+
+
+#define IOV_GET 0 
+#define IOV_SET 1 
+
+
+#define IOV_GVAL(id)		((id)*2)
+#define IOV_SVAL(id)		(((id)*2)+IOV_SET)
+#define IOV_ISSET(actionid)	((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid)	(actionid >> 1)
+
+
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif 
+#endif	
+
+
+#define IOVT_VOID	0	
+#define IOVT_BOOL	1	
+#define IOVT_INT8	2	
+#define IOVT_UINT8	3	
+#define IOVT_INT16	4	
+#define IOVT_UINT16	5	
+#define IOVT_INT32	6	
+#define IOVT_UINT32	7	
+#define IOVT_BUFFER	8	
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+
+#define BCM_IOV_TYPE_INIT { \
+	"void", \
+	"bool", \
+	"int8", \
+	"uint8", \
+	"int16", \
+	"uint16", \
+	"int32", \
+	"uint32", \
+	"buffer", \
+	"" }
+
+#define BCM_IOVT_IS_INT(type) (\
+	(type == IOVT_BOOL) || \
+	(type == IOVT_INT8) || \
+	(type == IOVT_UINT8) || \
+	(type == IOVT_INT16) || \
+	(type == IOVT_UINT16) || \
+	(type == IOVT_INT32) || \
+	(type == IOVT_UINT32))
+
+
+
+#define BCME_STRLEN 		64	
+#define VALID_BCMERROR(e)  ((e <= 0) && (e >= BCME_LAST))
+
+
+
+
+#define BCME_OK				0	
+#define BCME_ERROR			-1	
+#define BCME_BADARG			-2	
+#define BCME_BADOPTION			-3	
+#define BCME_NOTUP			-4	
+#define BCME_NOTDOWN			-5	
+#define BCME_NOTAP			-6	
+#define BCME_NOTSTA			-7	
+#define BCME_BADKEYIDX			-8	
+#define BCME_RADIOOFF 			-9	
+#define BCME_NOTBANDLOCKED		-10	
+#define BCME_NOCLK			-11	
+#define BCME_BADRATESET			-12	
+#define BCME_BADBAND			-13	
+#define BCME_BUFTOOSHORT		-14	
+#define BCME_BUFTOOLONG			-15	
+#define BCME_BUSY			-16	
+#define BCME_NOTASSOCIATED		-17	
+#define BCME_BADSSIDLEN			-18	
+#define BCME_OUTOFRANGECHAN		-19	
+#define BCME_BADCHAN			-20	
+#define BCME_BADADDR			-21	
+#define BCME_NORESOURCE			-22	
+#define BCME_UNSUPPORTED		-23	
+#define BCME_BADLEN			-24	
+#define BCME_NOTREADY			-25	
+#define BCME_EPERM			-26	
+#define BCME_NOMEM			-27	
+#define BCME_ASSOCIATED			-28	
+#define BCME_RANGE			-29	
+#define BCME_NOTFOUND			-30	
+#define BCME_WME_NOT_ENABLED		-31	
+#define BCME_TSPEC_NOTFOUND		-32	
+#define BCME_ACM_NOTSUPPORTED		-33	
+#define BCME_NOT_WME_ASSOCIATION	-34	
+#define BCME_SDIO_ERROR			-35	
+#define BCME_DONGLE_DOWN		-36	
+#define BCME_VERSION			-37 	
+#define BCME_TXFAIL			-38 	
+#define BCME_RXFAIL			-39	
+#define BCME_NODEVICE			-40 	
+#define BCME_NMODE_DISABLED		-41 	
+#define BCME_NONRESIDENT		-42 
+#define BCME_LAST			BCME_NONRESIDENT
+
+
+#define BCMERRSTRINGTABLE {		\
+	"OK",				\
+	"Undefined error",		\
+	"Bad Argument",			\
+	"Bad Option",			\
+	"Not up",			\
+	"Not down",			\
+	"Not AP",			\
+	"Not STA",			\
+	"Bad Key Index",		\
+	"Radio Off",			\
+	"Not band locked",		\
+	"No clock",			\
+	"Bad Rate valueset",		\
+	"Bad Band",			\
+	"Buffer too short",		\
+	"Buffer too long",		\
+	"Busy",				\
+	"Not Associated",		\
+	"Bad SSID len",			\
+	"Out of Range Channel",		\
+	"Bad Channel",			\
+	"Bad Address",			\
+	"Not Enough Resources",		\
+	"Unsupported",			\
+	"Bad length",			\
+	"Not Ready",			\
+	"Not Permitted",		\
+	"No Memory",			\
+	"Associated",			\
+	"Not In Range",			\
+	"Not Found",			\
+	"WME Not Enabled",		\
+	"TSPEC Not Found",		\
+	"ACM Not Supported",		\
+	"Not WME Association",		\
+	"SDIO Bus Error",		\
+	"Dongle Not Accessible",	\
+	"Incorrect version",		\
+	"TX Failure",			\
+	"RX Failure",			\
+	"Device Not Present",		\
+	"NMODE Disabled",		\
+	"Nonresident overlay access", \
+}
+
+#ifndef ABS
+#define	ABS(a)			(((a) < 0)?-(a):(a))
+#endif 
+
+#ifndef MIN
+#define	MIN(a, b)		(((a) < (b))?(a):(b))
+#endif 
+
+#ifndef MAX
+#define	MAX(a, b)		(((a) > (b))?(a):(b))
+#endif 
+
+#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
+#define	ROUNDUP(x, y)		((((x)+((y)-1))/(y))*(y))
+#define	ISALIGNED(a, x)		(((uintptr)(a) & ((x)-1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define	ISPOWEROF2(x)		((((x)-1)&(x)) == 0)
+#define VALID_MASK(mask)	!((mask) & ((mask) + 1))
+#ifndef OFFSETOF
+#define	OFFSETOF(type, member)	((uint)(uintptr)&((type *)0)->member)
+#endif 
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a)		(sizeof(a)/sizeof(a[0]))
+#endif
+
+
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f)	(_bcmutils_dummy_fn = (void *)(f))
+
+
+#ifndef setbit
+#ifndef NBBY		      
+#define	NBBY	8	
+#endif 
+#define	setbit(a, i)	(((uint8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define	clrbit(a, i)	(((uint8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define	isset(a, i)	(((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define	isclr(a, i)	((((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif 
+
+#define	NBITS(type)	(sizeof(type) * 8)
+#define NBITVAL(nbits)	(1 << (nbits))
+#define MAXBITVAL(nbits)	((1 << (nbits)) - 1)
+#define	NBITMASK(nbits)	MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte)	MAXBITVAL((nbyte) * 8)
+
+
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+
+#define MODADD(x, y, bound) \
+    MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+    MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+
+#define CRC8_INIT_VALUE  0xff		
+#define CRC8_GOOD_VALUE  0x9f		
+#define CRC16_INIT_VALUE 0xffff		
+#define CRC16_GOOD_VALUE 0xf0b8		
+#define CRC32_INIT_VALUE 0xffffffff	
+#define CRC32_GOOD_VALUE 0xdebb20e3	
+
+
+typedef struct bcm_bit_desc {
+	uint32	bit;
+	const char* name;
+} bcm_bit_desc_t;
+
+
+typedef struct bcm_tlv {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} bcm_tlv_t;
+
+
+#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
+
+
+#define ETHER_ADDR_STR_LEN	18	
+
+
+
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+	if (
+#ifdef __i386__
+	    1 ||
+#endif
+	    (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+		
+		
+		((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+		((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+		((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+		((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+	} else {
+		
+		int k;
+		for (k = 0; k < 16; k++)
+			dst[k] = src1[k] ^ src2[k];
+	}
+}
+
+
+
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC)
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+
+extern const char *bcmerrorstr(int bcmerror);
+
+
+typedef uint32 mbool;
+#define mboolset(mb, bit)		((mb) |= (bit))		
+#define mboolclr(mb, bit)		((mb) &= ~(bit))	
+#define mboolisset(mb, bit)		(((mb) & (bit)) != 0)	
+#define	mboolmaskset(mb, mask, val)	((mb) = (((mb) & ~(mask)) | (val)))
+
+
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+
+
+struct fielddesc {
+	const char *nameandfmt;
+	uint32 	offset;
+	uint32 	len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(char *name, const uchar *cdata, int len);
+
+typedef  uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+                          char *buf, uint32 bufsize);
+
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+
+
+#define SSID_FMT_BUF_LEN	((4 * DOT11_MAX_SSID_LEN) + 1)
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi.h b/drivers/net/wireless/bcmdhd/include/bcmwifi.h
new file mode 100644
index 0000000..45f3c03
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmwifi.h
@@ -0,0 +1,165 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi.h,v 1.29.6.3 2010-08-03 17:47:04 Exp $
+ */
+
+
+#ifndef	_bcmwifi_h_
+#define	_bcmwifi_h_
+
+
+
+typedef uint16 chanspec_t;
+
+
+#define CH_UPPER_SB			0x01
+#define CH_LOWER_SB			0x02
+#define CH_EWA_VALID			0x04
+#define CH_20MHZ_APART			4
+#define CH_10MHZ_APART			2
+#define CH_5MHZ_APART			1	
+#define CH_MAX_2G_CHANNEL		14	
+#define WLC_MAX_2G_CHANNEL		CH_MAX_2G_CHANNEL 
+#define	MAXCHANNEL		224	
+
+#define WL_CHANSPEC_CHAN_MASK		0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT		0
+
+#define WL_CHANSPEC_CTL_SB_MASK		0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT	     8
+#define WL_CHANSPEC_CTL_SB_LOWER	0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER	0x0200
+#define WL_CHANSPEC_CTL_SB_NONE		0x0300
+
+#define WL_CHANSPEC_BW_MASK		0x0C00
+#define WL_CHANSPEC_BW_SHIFT		    10
+#define WL_CHANSPEC_BW_10		0x0400
+#define WL_CHANSPEC_BW_20		0x0800
+#define WL_CHANSPEC_BW_40		0x0C00
+
+#define WL_CHANSPEC_BAND_MASK		0xf000
+#define WL_CHANSPEC_BAND_SHIFT		12
+#define WL_CHANSPEC_BAND_5G		0x1000
+#define WL_CHANSPEC_BAND_2G		0x2000
+#define INVCHANSPEC			255
+
+
+#define WF_CHAN_FACTOR_2_4_G		4814	
+#define WF_CHAN_FACTOR_5_G		10000	
+#define WF_CHAN_FACTOR_4_G		8000	
+
+
+#define LOWER_20_SB(channel)	(((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel)	(((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+				((channel) + CH_10MHZ_APART) : 0)
+#define CHSPEC_WLCBANDUNIT(chspec)	(CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel)	(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+				WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+				WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel)	(((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+					((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+					((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+					WL_CHANSPEC_BAND_5G))
+#define CHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec)	((chspec) & WL_CHANSPEC_BAND_MASK)
+
+
+#define CHSPEC_CTL_SB(chspec)  (chspec & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec)      (chspec & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec)	0
+#define CHSPEC_IS20(chspec)	1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	0
+#endif
+
+#else 
+
+#define CHSPEC_IS10(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#endif 
+
+#define CHSPEC_IS20_UNCOND(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+
+#define CHSPEC_IS5G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_NONE(chspec)	(((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+#define CHSPEC_SB_UPPER(chspec)	(((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+#define CHSPEC_SB_LOWER(chspec)	(((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+#define CHSPEC_CTL_CHAN(chspec)  ((CHSPEC_SB_LOWER(chspec)) ? \
+				  (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+				  (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+#define CHANSPEC_STR_LEN    8
+
+
+#define WLC_MAXRATE	108	
+#define WLC_RATE_1M	2	
+#define WLC_RATE_2M	4	
+#define WLC_RATE_5M5	11	
+#define WLC_RATE_11M	22	
+#define WLC_RATE_6M	12	
+#define WLC_RATE_9M	18	
+#define WLC_RATE_12M	24	
+#define WLC_RATE_18M	36	
+#define WLC_RATE_24M	48	
+#define WLC_RATE_36M	72	
+#define WLC_RATE_48M	96	
+#define WLC_RATE_54M	108	
+
+#define WLC_2G_25MHZ_OFFSET		5	
+
+
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+
+extern chanspec_t wf_chspec_aton(char *a);
+
+
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 0000000..9661dac
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,129 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h,v 13.11.10.1 2010-12-22 23:47:26 Exp $
+ */
+
+#ifndef _dhdioctl_h_
+#define	_dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	bool set;	/* get or set request (optional) */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+	uint driver;	/* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+	BUS_TYPE_USB = 0, /* for USB dongles */
+	BUS_TYPE_SDIO /* for SDIO dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC		0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION	1
+
+#define	DHD_IOCTL_MAXLEN	8192		/* max length ioctl buffer required */
+#define	DHD_IOCTL_SMLEN		256		/* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC				0
+#define DHD_GET_VERSION				1
+#define DHD_GET_VAR				2
+#define DHD_SET_VAR				3
+
+/* message levels */
+#define DHD_ERROR_VAL	0x0001
+#define DHD_TRACE_VAL	0x0002
+#define DHD_INFO_VAL	0x0004
+#define DHD_DATA_VAL	0x0008
+#define DHD_CTL_VAL	0x0010
+#define DHD_TIMER_VAL	0x0020
+#define DHD_HDRS_VAL	0x0040
+#define DHD_BYTES_VAL	0x0080
+#define DHD_INTR_VAL	0x0100
+#define DHD_LOG_VAL	0x0200
+#define DHD_GLOM_VAL	0x0400
+#define DHD_EVENT_VAL	0x0800
+#define DHD_BTA_VAL	0x1000
+#define DHD_ISCAN_VAL	0x2000
+#define DHD_ARPOE_VAL	0x4000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+	uint version;		/* To allow structure change tracking */
+	uint freq;		/* Max ticks between tx/rx attempts */
+	uint count;		/* Test packets to send/rcv each attempt */
+	uint print;		/* Print counts every <print> attempts */
+	uint total;		/* Total packets (or bursts) */
+	uint minlen;		/* Minimum length of packets to send */
+	uint maxlen;		/* Maximum length of packets to send */
+	uint numsent;		/* Count of test packets sent */
+	uint numrcvd;		/* Count of test packets received */
+	uint numfail;		/* Count of test send failures */
+	uint mode;		/* Test mode (type of test packets) */
+	uint stop;		/* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO		1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 	2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST	3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV		4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE	(-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE	0	/* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP   (-1)	/* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 0000000..f8623f0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.32.4.1 2010-09-17 00:39:18 Exp $
+ *
+*/
+
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	5
+
+#define	EPI_MINOR_VERSION	90
+
+#define	EPI_RC_NUMBER		125
+
+#define	EPI_INCREMENTAL_NUMBER	33
+
+#define	EPI_BUILD_NUMBER	0
+
+#define	EPI_VERSION		5, 90, 125, 33
+
+#define	EPI_VERSION_NUM		0x055a7d21
+
+#define EPI_VERSION_DEV		5.90.125
+
+
+#define	EPI_VERSION_STR		"5.90.125.33"
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 0000000..51c51b9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,34 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h,v 13.35.8.5 2011-02-11 00:56:32 Exp $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
new file mode 100644
index 0000000..8b9615c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h,v 13.4.14.1 2011-02-05 00:04:30 Exp $
+ */
+
+#ifndef	_hndrte_armtrap_h
+#define	_hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define	TRAP_STRIDE	4
+#define FIRST_TRAP	TR_RST
+#define LAST_TRAP	(TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define	MAX_TRAP_TYPE	(TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define	MAX_TRAP_TYPE	(TR_ISR + ARMCM3_NUMINTS)
+#endif	/* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define	TR_TYPE		0x00
+#define	TR_EPC		0x04
+#define	TR_CPSR		0x08
+#define	TR_SPSR		0x0c
+#define	TR_REGS		0x10
+#define	TR_REG(n)	(TR_REGS + (n) * 4)
+#define	TR_SP		TR_REG(13)
+#define	TR_LR		TR_REG(14)
+#define	TR_PC		TR_REG(15)
+
+#define	TRAP_T_SIZE	80
+
+#ifndef	_LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+	uint32		type;
+	uint32		epc;
+	uint32		cpsr;
+	uint32		spsr;
+	uint32		r0;
+	uint32		r1;
+	uint32		r2;
+	uint32		r3;
+	uint32		r4;
+	uint32		r5;
+	uint32		r6;
+	uint32		r7;
+	uint32		r8;
+	uint32		r9;
+	uint32		r10;
+	uint32		r11;
+	uint32		r12;
+	uint32		r13;
+	uint32		r14;
+	uint32		pc;
+} trap_t;
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+#endif	/* _hndrte_armtrap_h */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_cons.h b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
new file mode 100644
index 0000000..b9ede53
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
@@ -0,0 +1,68 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h,v 13.4.10.4 2011-02-05 00:08:20 Exp $
+ */
+
+#ifndef	_HNDRTE_CONS_H
+#define	_HNDRTE_CONS_H
+
+#include <typedefs.h>
+
+#define CBUF_LEN	(128)
+
+#define LOG_BUF_LEN	1024
+
+typedef struct {
+	uint32		buf;		/* Can't be pointer on (64-bit) hosts */
+	uint		buf_size;
+	uint		idx;
+	char		*_buf_compat;	/* redundant pointer for backward compat. */
+} hndrte_log_t;
+
+typedef struct {
+	/* Virtual UART
+	 *   When there is no UART (e.g. Quickturn), the host should write a complete
+	 *   input line directly into cbuf and then write the length into vcons_in.
+	 *   This may also be used when there is a real UART (at risk of conflicting with
+	 *   the real UART).  vcons_out is currently unused.
+	 */
+	volatile uint	vcons_in;
+	volatile uint	vcons_out;
+
+	/* Output (logging) buffer
+	 *   Console output is written to a ring buffer log_buf at index log_idx.
+	 *   The host may read the output when it sees log_idx advance.
+	 *   Output will be lost if the output wraps around faster than the host polls.
+	 */
+	hndrte_log_t	log;
+
+	/* Console input line buffer
+	 *   Characters are read one at a time into cbuf until <CR> is received, then
+	 *   the buffer is processed as a command line.  Also used for virtual UART.
+	 */
+	uint		cbuf_idx;
+	char		cbuf[CBUF_LEN];
+} hndrte_cons_t;
+
+#endif /* _HNDRTE_CONS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 0000000..4e26121
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,207 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h,v 13.11 2009-12-03 23:52:31 Exp $
+ */
+
+#ifndef	_HNDSOC_H
+#define	_HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE		0x00000000	/* Physical SDRAM */
+#define SI_PCI_MEM		0x08000000	/* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ		(64 * 1024 * 1024)
+#define SI_PCI_CFG		0x0c000000	/* Host Mode sb2pcitranslation1 (64 MB) */
+#define	SI_SDRAM_SWAPPED	0x10000000	/* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2		0x80000000	/* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE    	0x18000000	/* Enumeration space base */
+
+#define SI_WRAP_BASE    	0x18100000	/* Wrapper space base */
+#define SI_CORE_SIZE    	0x1000		/* each core gets 4Kbytes for registers */
+#define	SI_MAXCORES		16		/* Max cores (this is arbitrary, for software
+						 * convenience and could be changed if we
+						 * make any larger chips
+						 */
+
+#define	SI_FASTRAM		0x19000000	/* On-chip RAM on chips that also have DDR */
+#define	SI_FASTRAM_SWAPPED	0x19800000
+
+#define	SI_FLASH2		0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define	SI_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
+#define	SI_ARMCM3_ROM		0x1e000000	/* ARM Cortex-M3 ROM */
+#define	SI_FLASH1		0x1fc00000	/* MIPS Flash Region 1 */
+#define	SI_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define	SI_ARM7S_ROM		0x20000000	/* ARM7TDMI-S ROM */
+#define	SI_ARMCM3_SRAM2		0x60000000	/* ARM Cortex-M3 SRAM Region 2 */
+#define	SI_ARM7S_SRAM2		0x80000000	/* ARM7TDMI-S SRAM Region 2 */
+#define	SI_ARM_FLASH1		0xffff0000	/* ARM Flash Region 1 */
+#define	SI_ARM_FLASH1_SZ	0x00010000	/* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA		0x40000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2		0x80000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ		0x40000000	/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32		0x00000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), low 32 bits
+						 */
+#define SI_PCIE_DMA_H32		0x80000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+
+/* core codes */
+#define	NODEV_CORE_ID		0x700		/* Invalid coreid */
+#define	CC_CORE_ID		0x800		/* chipcommon core */
+#define	ILINE20_CORE_ID		0x801		/* iline20 core */
+#define	SRAM_CORE_ID		0x802		/* sram core */
+#define	SDRAM_CORE_ID		0x803		/* sdram core */
+#define	PCI_CORE_ID		0x804		/* pci core */
+#define	MIPS_CORE_ID		0x805		/* mips core */
+#define	ENET_CORE_ID		0x806		/* enet mac core */
+#define	CODEC_CORE_ID		0x807		/* v90 codec core */
+#define	USB_CORE_ID		0x808		/* usb 1.1 host/device core */
+#define	ADSL_CORE_ID		0x809		/* ADSL core */
+#define	ILINE100_CORE_ID	0x80a		/* iline100 core */
+#define	IPSEC_CORE_ID		0x80b		/* ipsec core */
+#define	UTOPIA_CORE_ID		0x80c		/* utopia core */
+#define	PCMCIA_CORE_ID		0x80d		/* pcmcia core */
+#define	SOCRAM_CORE_ID		0x80e		/* internal memory core */
+#define	MEMC_CORE_ID		0x80f		/* memc sdram core */
+#define	OFDM_CORE_ID		0x810		/* OFDM phy core */
+#define	EXTIF_CORE_ID		0x811		/* external interface core */
+#define	D11_CORE_ID		0x812		/* 802.11 MAC core */
+#define	APHY_CORE_ID		0x813		/* 802.11a phy core */
+#define	BPHY_CORE_ID		0x814		/* 802.11b phy core */
+#define	GPHY_CORE_ID		0x815		/* 802.11g phy core */
+#define	MIPS33_CORE_ID		0x816		/* mips3302 core */
+#define	USB11H_CORE_ID		0x817		/* usb 1.1 host core */
+#define	USB11D_CORE_ID		0x818		/* usb 1.1 device core */
+#define	USB20H_CORE_ID		0x819		/* usb 2.0 host core */
+#define	USB20D_CORE_ID		0x81a		/* usb 2.0 device core */
+#define	SDIOH_CORE_ID		0x81b		/* sdio host core */
+#define	ROBO_CORE_ID		0x81c		/* roboswitch core */
+#define	ATA100_CORE_ID		0x81d		/* parallel ATA core */
+#define	SATAXOR_CORE_ID		0x81e		/* serial ATA & XOR DMA core */
+#define	GIGETH_CORE_ID		0x81f		/* gigabit ethernet core */
+#define	PCIE_CORE_ID		0x820		/* pci express core */
+#define	NPHY_CORE_ID		0x821		/* 802.11n 2x2 phy core */
+#define	SRAMC_CORE_ID		0x822		/* SRAM controller core */
+#define	MINIMAC_CORE_ID		0x823		/* MINI MAC/phy core */
+#define	ARM11_CORE_ID		0x824		/* ARM 1176 core */
+#define	ARM7S_CORE_ID		0x825		/* ARM7tdmi-s core */
+#define	LPPHY_CORE_ID		0x826		/* 802.11a/b/g phy core */
+#define	PMU_CORE_ID		0x827		/* PMU core */
+#define	SSNPHY_CORE_ID		0x828		/* 802.11n single-stream phy core */
+#define	SDIOD_CORE_ID		0x829		/* SDIO device core */
+#define	ARMCM3_CORE_ID		0x82a		/* ARM Cortex M3 core */
+#define	HTPHY_CORE_ID		0x82b		/* 802.11n 4x4 phy core */
+#define	MIPS74K_CORE_ID		0x82c		/* mips 74k core */
+#define	GMAC_CORE_ID		0x82d		/* Gigabit MAC core */
+#define	DMEMC_CORE_ID		0x82e		/* DDR1/2 memory controller core */
+#define	PCIERC_CORE_ID		0x82f		/* PCIE Root Complex core */
+#define	OCP_CORE_ID		0x830		/* OCP2OCP bridge core */
+#define	SC_CORE_ID		0x831		/* shared common core */
+#define	AHB_CORE_ID		0x832		/* OCP2AHB bridge core */
+#define	SPIH_CORE_ID		0x833		/* SPI host core */
+#define	I2S_CORE_ID		0x834		/* I2S core */
+#define	DMEMS_CORE_ID		0x835		/* SDR/DDR1 memory controller core */
+#define	DEF_SHIM_COMP		0x837		/* SHIM component in ubus/6362 */
+#define OOB_ROUTER_CORE_ID	0x367		/* OOB router core ID */
+#define	DEF_AI_COMP		0xfff		/* Default component, in ai chips it maps all
+						 * unused address ranges
+						 */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define	SI_CC_IDX		0
+
+/* SOC Interconnect types (aka chip types) */
+#define	SOCI_SB			0
+#define	SOCI_AI			1
+#define	SOCI_UBUS		2
+
+/* Common core control flags */
+#define	SICF_BIST_EN		0x8000
+#define	SICF_PME_EN		0x4000
+#define	SICF_CORE_BITS		0x3ffc
+#define	SICF_FGC		0x0002
+#define	SICF_CLOCK_EN		0x0001
+
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST		0x1e0		/* clock control and status */
+
+/* clk_ctl_st register */
+#define	CCS_FORCEALP		0x00000001	/* force ALP request */
+#define	CCS_FORCEHT		0x00000002	/* force HT request */
+#define	CCS_FORCEILP		0x00000004	/* force ILP request */
+#define	CCS_ALPAREQ		0x00000008	/* ALP Avail Request */
+#define	CCS_HTAREQ		0x00000010	/* HT Avail Request */
+#define	CCS_FORCEHWREQOFF	0x00000020	/* Force HW Clock Request Off */
+#define CCS_ERSRC_REQ_MASK	0x00000700	/* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT	8
+#define	CCS_ALPAVAIL		0x00010000	/* ALP is available */
+#define	CCS_HTAVAIL		0x00020000	/* HT is available */
+#define CCS_BP_ON_APL		0x00040000	/* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT		0x00080000	/* RO: Backplane is running on HT clock */
+#define CCS_ERSRC_STS_MASK	0x07000000	/* external resource status */
+#define CCS_ERSRC_STS_SHIFT	24
+
+#define	CCS0_HTAVAIL		0x00010000	/* HT avail in chipc and pcmcia on 4328a0 */
+#define	CCS0_ALPAVAIL		0x00020000	/* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size  */
+#define	BISZ_OFFSET		0x3e0		/* At this offset into the binary */
+#define	BISZ_MAGIC		0x4249535a	/* Marked with this value: 'BISZ' */
+#define	BISZ_MAGIC_IDX		0		/* Word 0: magic */
+#define	BISZ_TXTST_IDX		1		/*	1: text start */
+#define	BISZ_TXTEND_IDX		2		/*	2: text end */
+#define	BISZ_DATAST_IDX		3		/*	3: data start */
+#define	BISZ_DATAEND_IDX	4		/*	4: data end */
+#define	BISZ_BSSST_IDX		5		/*	5: bss start */
+#define	BISZ_BSSEND_IDX		6		/*	6: bss end */
+#define BISZ_SIZE		7		/* descriptor size in 32-bit integers */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/htsf.h b/drivers/net/wireless/bcmdhd/include/htsf.h
new file mode 100644
index 0000000..379fbbe
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/htsf.h
@@ -0,0 +1,74 @@
+/*
+ * Time stamps for latency measurements 
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: htsf.h,v 1.1.2.4 2011-01-21 08:27:03 Exp $
+ */
+#ifndef _HTSF_H_
+#define _HTSF_H_
+
+#define HTSFMAGIC       	0xCDCDABAB  /* in network order for tcpdump  */
+#define HTSFENDMAGIC    	0xEFEFABAB  /* to distinguish from RT2 magic */
+#define HTSF_HOSTOFFSET		102
+#define HTSF_DNGLOFFSET		HTSF_HOSTOFFSET	- 4
+#define HTSF_DNGLOFFSET2	HTSF_HOSTOFFSET	+ 106
+#define HTSF_MIN_PKTLEN 	200
+#define ETHER_TYPE_BRCM_PKTDLYSTATS     0x886d
+
+typedef enum htsfts_type {
+	T10,
+	T20,
+	T30,
+	T40,
+	T50,
+	T60,
+	T70,
+	T80,
+	T90,
+	TA0,
+	TE0
+} htsf_timestamp_t;
+
+typedef struct {
+	uint32 magic;
+	uint32 prio;
+	uint32 seqnum;
+	uint32 misc;
+	uint32 c10;
+	uint32 t10;
+	uint32 c20;
+	uint32 t20;
+	uint32 t30;
+	uint32 t40;
+	uint32 t50;
+	uint32 t60;
+	uint32 t70;
+	uint32 t80;
+	uint32 t90;
+	uint32 cA0;
+	uint32 tA0;
+	uint32 cE0;
+	uint32 tE0;
+	uint32 endmagic;
+} htsfts_t;
+
+#endif /* _HTSF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 0000000..1ec136e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,431 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h,v 13.158.6.3 2010-12-22 23:47:26 Exp $
+ */
+
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+
+
+#ifdef BCMDRIVER
+
+
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+
+extern uint32 g_assert_type;
+
+
+#if defined(BCMASSERT_LOG)
+	#define ASSERT(exp) \
+	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(char *exp, char *file, int line);
+#else
+	#ifdef __GNUC__
+		#define GCC_VERSION \
+			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+		#if GCC_VERSION > 30100
+			#define ASSERT(exp)	do {} while (0)
+		#else
+			
+			#define ASSERT(exp)
+		#endif 
+	#endif 
+#endif 
+
+
+#define	OSL_DELAY(usec)		osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+
+#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
+	osl_pci_read_config((osh), (offset), (size))
+#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+	osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+
+
+typedef struct {
+	bool pkttag;
+	uint pktalloced; 	
+	bool mmbus;		
+	pktfree_cb_fn_t tx_fn;  
+	void *tx_ctx;		
+} osl_pubinfo_t;
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx)		\
+	do {						\
+	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn;	\
+	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx;	\
+	} while (0)
+
+
+
+#define BUS_SWAP32(v)		(v)
+
+	#define MALLOC(osh, size)	osl_malloc((osh), (size))
+	#define MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
+	#define MALLOCED(osh)		osl_malloced((osh))
+	extern void *osl_malloc(osl_t *osh, uint size);
+	extern void osl_mfree(osl_t *osh, void *addr, uint size);
+	extern uint osl_malloced(osl_t *osh);
+
+#define NATIVE_MALLOC(osh, size)		kmalloc(size, GFP_ATOMIC)
+#define NATIVE_MFREE(osh, addr, size)	kfree(addr)
+
+#define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+
+#define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
+#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+
+
+#define	DMA_TX	1	
+#define	DMA_RX	2	
+
+
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction))
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+
+
+	#include <bcmsdh.h>
+	#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
+	#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+		mmap_op else bus_op
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+		mmap_op : bus_op
+
+#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+
+#define	PKTBUFSZ	2048   
+
+
+
+#define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
+#define	printf(fmt, args...)	printk(fmt , ## args)
+#include <linux/kernel.h>	
+#include <linux/string.h>	
+
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+
+
+#ifndef __mips__
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(uint8) ? readb((volatile uint8*)(r)) : \
+	sizeof(*(r)) == sizeof(uint16) ? readw((volatile uint16*)(r)) : \
+	readl((volatile uint32*)(r)), OSL_READ_REG(osh, r)) \
+)
+#else 
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			__asm__ __volatile__("sync"); \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			__asm__ __volatile__("sync"); \
+			__osl_v; \
+		}), \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			__asm__ __volatile__("sync"); \
+			__osl_v = OSL_READ_REG(osh, r); \
+			__asm__ __volatile__("sync"); \
+			__osl_v; \
+		})) \
+)
+#endif 
+
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh,  \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+
+
+#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+
+
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+
+#ifdef __mips__
+#include <asm/addrspace.h>
+#define OSL_UNCACHED(va)	((void *)KSEG1ADDR((va)))
+#define OSL_CACHED(va)		((void *)KSEG0ADDR((va)))
+#else
+#define OSL_UNCACHED(va)	((void *)va)
+#define OSL_CACHED(va)		((void *)va)
+#endif 
+
+
+#if defined(__i386__)
+#define	OSL_GETCYCLES(x)	rdtscl((x))
+#else
+#define OSL_GETCYCLES(x)	((x) = 0)
+#endif 
+
+
+#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
+
+
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define	REG_MAP(pa, size)	ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif 
+#define	REG_UNMAP(va)		iounmap((va))
+
+
+#define	R_SM(r)			*(r)
+#define	W_SM(r, v)		(*(r) = (v))
+#define	BZERO_SM(r, len)	memset((r), '\0', (len))
+
+
+#include <linuxver.h>		
+
+
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
+#define PKTLIST_DUMP(osh, buf)
+#define PKTDBG_TRACE(osh, pkt, bit)
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
+#ifdef DHD_USE_STATIC_BUF
+#define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
+#define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
+#endif
+#define	PKTDATA(osh, skb)		(((struct sk_buff*)(skb))->data)
+#define	PKTLEN(osh, skb)		(((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define	PKTNEXT(osh, skb)		(((struct sk_buff*)(skb))->next)
+#define	PKTSETNEXT(osh, skb, x)		(((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define	PKTSETLEN(osh, skb, len)	__skb_trim((struct sk_buff*)(skb), (len))
+#define	PKTPUSH(osh, skb, bytes)	skb_push((struct sk_buff*)(skb), (bytes))
+#define	PKTPULL(osh, skb, bytes)	skb_pull((struct sk_buff*)(skb), (bytes))
+#define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh)			((osl_pubinfo_t *)(osh))->pktalloced
+#define PKTSETPOOL(osh, skb, x, y)	do {} while (0)
+#define PKTPOOL(osh, skb)		FALSE
+#define PKTSHRINK(osh, m)		(m)
+
+#ifdef CTFPOOL
+#define	CTFPOOL_REFILL_THRESH	3
+typedef struct ctfpool {
+	void		*head;
+	spinlock_t	lock;
+	uint		max_obj;
+	uint		curr_obj;
+	uint		obj_size;
+	uint		refills;
+	uint		fast_allocs;
+	uint 		fast_frees;
+	uint 		slow_allocs;
+} ctfpool_t;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	FASTBUF	(1 << 4)
+#define	CTFBUF	(1 << 5)
+#define	PKTSETFAST(osh, skb)	((((struct sk_buff*)(skb))->mac_len) |= FASTBUF)
+#define	PKTCLRFAST(osh, skb)	((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF))
+#define	PKTSETCTF(osh, skb)	((((struct sk_buff*)(skb))->mac_len) |= CTFBUF)
+#define	PKTCLRCTF(osh, skb)	((((struct sk_buff*)(skb))->mac_len) &= (~CTFBUF))
+#define	PKTISFAST(osh, skb)	((((struct sk_buff*)(skb))->mac_len) & FASTBUF)
+#define	PKTISCTF(osh, skb)	((((struct sk_buff*)(skb))->mac_len) & CTFBUF)
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->mac_len)
+#else
+#define	FASTBUF	(1 << 0)
+#define	CTFBUF	(1 << 1)
+#define	PKTSETFAST(osh, skb)	((((struct sk_buff*)(skb))->__unused) |= FASTBUF)
+#define	PKTCLRFAST(osh, skb)	((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF))
+#define	PKTSETCTF(osh, skb)	((((struct sk_buff*)(skb))->__unused) |= CTFBUF)
+#define	PKTCLRCTF(osh, skb)	((((struct sk_buff*)(skb))->__unused) &= (~CTFBUF))
+#define	PKTISFAST(osh, skb)	((((struct sk_buff*)(skb))->__unused) & FASTBUF)
+#define	PKTISCTF(osh, skb)	((((struct sk_buff*)(skb))->__unused) & CTFBUF)
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->__unused)
+#endif 
+
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->sk)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#endif 
+
+#ifdef HNDCTF
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	SKIPCT	(1 << 6)
+#define	PKTSETSKIPCT(osh, skb)	(((struct sk_buff*)(skb))->mac_len |= SKIPCT)
+#define	PKTCLRSKIPCT(osh, skb)	(((struct sk_buff*)(skb))->mac_len &= (~SKIPCT))
+#define	PKTSKIPCT(osh, skb)	(((struct sk_buff*)(skb))->mac_len & SKIPCT)
+#else 
+#define	SKIPCT	(1 << 2)
+#define	PKTSETSKIPCT(osh, skb)	(((struct sk_buff*)(skb))->__unused |= SKIPCT)
+#define	PKTCLRSKIPCT(osh, skb)	(((struct sk_buff*)(skb))->__unused &= (~SKIPCT))
+#define	PKTSKIPCT(osh, skb)	(((struct sk_buff*)(skb))->__unused & SKIPCT)
+#endif 
+#else 
+#define	PKTSETSKIPCT(osh, skb)
+#define	PKTCLRSKIPCT(osh, skb)
+#define	PKTSKIPCT(osh, skb)
+#endif 
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+
+
+static INLINE void *
+osl_pkt_frmnative(osl_pubinfo_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+
+	if (osh->pkttag)
+		bzero((void*)((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+	
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		osh->pktalloced++;
+	}
+
+	return (void *)pkt;
+}
+#define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb))
+
+
+static INLINE struct sk_buff *
+osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+
+	if (osh->pkttag)
+		bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+	
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		osh->pktalloced--;
+	}
+
+	return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt))
+
+#define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
+#define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
+#define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
+						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+#define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
+
+
+
+#else 
+
+
+
+	#define ASSERT(exp)	do {} while (0)
+
+
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+
+#include <string.h>
+
+
+#include <stdio.h>
+
+
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif 
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 0000000..b64d0bb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,592 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h,v 13.53.2.2 2010-12-22 23:47:26 Exp $
+ */
+
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif 
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif 
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
+#else
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
+typedef void (*work_func_t)(void *work);
+#endif	
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif	
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED	SA_SHIRQ
+#endif 
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef	CONFIG_NET_RADIO
+#define	CONFIG_WIRELESS_EXT
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#ifndef SANDGATE2G
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif 
+#endif 
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif 
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#define __devinit	__init
+#endif
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x)	x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev)		(dev)->sysdata
+#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
+
+
+
+struct pci_device_id {
+	unsigned int vendor, device;		
+	unsigned int subvendor, subdevice;	
+	unsigned int class, class_mask;		
+	unsigned long driver_data;		
+};
+
+struct pci_driver {
+	struct list_head node;
+	char *name;
+	const struct pci_device_id *id_table;	
+	int (*probe)(struct pci_dev *dev,
+	             const struct pci_device_id *id); 
+	void (*remove)(struct pci_dev *dev);	
+	void (*suspend)(struct pci_dev *dev);	
+	void (*resume)(struct pci_dev *dev);	
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif 
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x)	__initcall(x);
+#define module_exit(x)	__exitcall(x);
+#endif
+#endif	
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL_INPUT)
+#define WL_CONFIG_RFKILL_INPUT
+#else
+#undef WL_CONFIG_RFKILL_INPUT
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+
+
+#ifndef PCI_DMA_TODEVICE
+#define	PCI_DMA_TODEVICE	1
+#define	PCI_DMA_FROMDEVICE	2
+#endif
+
+typedef u32 dma_addr_t;
+
+
+static inline int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                                         dma_addr_t *dma_handle)
+{
+	void *ret;
+	int gfp = GFP_ATOMIC | GFP_DMA;
+
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_bus(ret);
+	}
+	return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                                       void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif 
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
+#define netif_down(dev)			do { (dev)->start = 0; } while (0)
+
+
+#ifndef _COMPAT_NETDEVICE_H
+
+
+
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define netif_queue_stopped(dev)	(dev)->tbusy
+#define netif_running(dev)		(dev)->start
+
+#endif 
+
+#define netif_device_attach(dev)	netif_start_queue(dev)
+#define netif_device_detach(dev)	netif_stop_queue(dev)
+
+
+#define tasklet_struct				tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+	queue_task(tasklet, &tq_immediate);
+	mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+                                void (*func)(unsigned long),
+                                unsigned long data)
+{
+	tasklet->next = NULL;
+	tasklet->sync = 0;
+	tasklet->routine = (void (*)(void *))func;
+	tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet)	{ do {} while (0); }
+
+
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif 
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+
+#define PREPARE_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		(_tq)->routine = _routine;			\
+		(_tq)->data = _data;				\
+	} while (0)
+
+
+#define INIT_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		INIT_LIST_HEAD(&(_tq)->list);			\
+		(_tq)->sync = 0;				\
+		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
+	} while (0)
+
+#endif	
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
+#else
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_read_config_dword(dev, i * 4, &buffer[i]);
+	}
+	return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_write_config_dword(dev, i * 4, buffer[i]);
+	}
+	
+	else {
+		for (i = 0; i < 6; i ++)
+			pci_write_config_dword(dev,
+			                       PCI_BASE_ADDRESS_0 + (i * 4),
+			                       pci_resource_start(dev, i));
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+	return 0;
+}
+#endif 
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT		do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
+#endif
+#else 
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT			do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT			do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#endif 
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev)		kfree(dev)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#define af_packet_priv			data
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW	CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+	void 	*parent;  
+	struct	task_struct *p_task;
+	long 	thr_pid;
+	int 	prio; 
+	struct	semaphore sema;
+	bool	terminated;
+	struct	completion completed;
+} tsk_ctl_t;
+
+
+
+
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags) \
+{ \
+	sema_init(&((tsk_ctl)->sema), 0); \
+	init_completion(&((tsk_ctl)->completed)); \
+	(tsk_ctl)->parent = owner; \
+	(tsk_ctl)->terminated = FALSE; \
+	(tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \
+	if ((tsk_ctl)->thr_pid > 0) \
+		wait_for_completion(&((tsk_ctl)->completed)); \
+	DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+	(tsk_ctl)->terminated = TRUE; \
+	smp_wmb(); \
+	up(&((tsk_ctl)->sema));	\
+	wait_for_completion(&((tsk_ctl)->completed)); \
+	DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+	(tsk_ctl)->thr_pid = -1; \
+}
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid;    \
+pid = find_get_pid((pid_t)nr);    \
+tsk = pid_task(pid, PIDTYPE_PID);    \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+	struct task_struct *tsk; \
+	tsk = find_task_by_vpid(pid); \
+	if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+	kill_proc(pid, sig, 1); \
+}
+#endif
+#endif 
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret)		\
+do {									\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			ret = schedule_timeout(ret);			\
+			if (!ret)					\
+				break;					\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout)	\
+({									\
+	long __ret = timeout;						\
+	if (!(condition))						\
+		__wait_event_interruptible_timeout(wq, condition, __ret); \
+	__ret;								\
+})
+
+#endif 
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#define WL_DEV_IF(dev)          ((wl_if_t*)netdev_priv(dev))
+#else
+#define WL_DEV_IF(dev)          ((wl_if_t*)(dev)->priv)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p)         wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
+#endif  
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif 
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 0000000..f468420
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h,v 1.3 2009-01-15 00:06:54 Exp $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY	128	/* Max options */
+typedef struct miniopt {
+
+	/* These are persistent after miniopt_init() */
+	const char* name;		/* name for prompt in error strings */
+	const char* flags;		/* option chars that take no args */
+	bool longflags;		/* long options may be flags */
+	bool opt_end;		/* at end of options (passed a "--") */
+
+	/* These are per-call to miniopt() */
+
+	int consumed;		/* number of argv entries cosumed in
+				 * the most recent call to miniopt()
+				 */
+	bool positional;
+	bool good_int;		/* 'val' member is the result of a sucessful
+				 * strtol conversion of the option value
+				 */
+	char opt;
+	char key[MINIOPT_MAXKEY];
+	char* valstr;		/* positional param, or value for the option,
+				 * or null if the option had
+				 * no accompanying value
+				 */
+	uint uval;		/* strtol translation of valstr */
+	int  val;		/* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif  /* MINI_OPT_H  */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 0000000..721d421
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,74 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h,v 1.4 2009-04-10 04:15:32 Exp $
+ */
+
+#ifndef	_MSGTRACE_H
+#define	_MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+	uint8	version;
+	uint8   spare;
+	uint16	len;	/* Len of the trace */
+	uint32	seqnum;	/* Sequence number of message. Useful if the messsage has been lost
+			 * because of DMA error or a bus reset (ex: SDIO Func2)
+			 */
+	uint32  discarded_bytes;  /* Number of discarded bytes because of trace overflow  */
+	uint32  discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN 	sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces. 
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+                                     uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern void msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 0000000..80248ee
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,66 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: osl.h,v 13.44.96.1 2010-05-20 11:09:18 Exp $
+ */
+
+
+#ifndef _osl_h_
+#define _osl_h_
+
+
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ	32 
+
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)
+#endif
+
+
+
+#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif   
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif   
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif 
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 0000000..5d4a876
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,54 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h,v 1.4 2008-12-09 23:43:22 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+	#undef BWL_PACKED_SECTION
+#else
+	#error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+
+#undef	BWL_PRE_PACKED_STRUCT
+#undef	BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 0000000..da2fed6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,61 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h,v 1.4.124.1 2010-09-17 00:47:03 Exp $
+ */
+
+
+
+
+#ifdef BWL_PACKED_SECTION
+	#error "BWL_PACKED_SECTION is already defined!"
+#else
+	#define BWL_PACKED_SECTION
+#endif
+
+
+
+
+
+#if defined(__GNUC__)
+	#define	BWL_PRE_PACKED_STRUCT
+	#define	BWL_POST_PACKED_STRUCT	__attribute__ ((packed))
+#elif defined(__CC_ARM)
+	#define	BWL_PRE_PACKED_STRUCT	__packed
+	#define	BWL_POST_PACKED_STRUCT
+#else
+	#error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 0000000..fae063a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,52 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h,v 1.50 2009-12-07 21:56:06 Exp $
+ */
+
+
+#ifndef	_h_pcicfg_
+#define	_h_pcicfg_
+
+
+#define	PCI_CFG_VID		0
+#define	PCI_CFG_CMD		4
+#define	PCI_CFG_REV		8
+#define	PCI_CFG_BAR0		0x10
+#define	PCI_CFG_BAR1		0x14
+#define	PCI_BAR0_WIN		0x80	
+#define	PCI_INT_STATUS		0x90	
+#define	PCI_INT_MASK		0x94	
+
+#define PCIE_EXTCFG_OFFSET	0x100
+#define	PCI_BAR0_PCIREGS_OFFSET	(6 * 1024)	
+#define	PCI_BAR0_PCISBR_OFFSET	(4 * 1024)	
+
+#define PCI_BAR0_WINSZ		(16 * 1024)	
+
+
+#define	PCI_16KB0_PCIREGS_OFFSET (8 * 1024)	
+#define	PCI_16KB0_CCREGS_OFFSET	(12 * 1024)	
+#define PCI_16KBB0_WINSZ	(16 * 1024)	
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
new file mode 100644
index 0000000..2342cb3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
@@ -0,0 +1,1731 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h,v 9.260.2.6 2010-12-15 21:41:14 Exp $
+ */
+
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US          1024    
+
+
+#define DOT11_A3_HDR_LEN        24  
+#define DOT11_A4_HDR_LEN        30  
+#define DOT11_MAC_HDR_LEN       DOT11_A3_HDR_LEN    
+#define DOT11_FCS_LEN           4   
+#define DOT11_ICV_LEN           4   
+#define DOT11_ICV_AES_LEN       8   
+#define DOT11_QOS_LEN           2   
+#define DOT11_HTC_LEN           4   
+
+#define DOT11_KEY_INDEX_SHIFT       6   
+#define DOT11_IV_LEN            4   
+#define DOT11_IV_TKIP_LEN       8   
+#define DOT11_IV_AES_OCB_LEN        4   
+#define DOT11_IV_AES_CCM_LEN        8   
+#define DOT11_IV_MAX_LEN        8   
+
+
+#define DOT11_MAX_MPDU_BODY_LEN     2304    
+
+#define DOT11_MAX_MPDU_LEN      (DOT11_A4_HDR_LEN + \
+					 DOT11_QOS_LEN + \
+					 DOT11_IV_AES_CCM_LEN + \
+					 DOT11_MAX_MPDU_BODY_LEN + \
+					 DOT11_ICV_LEN + \
+					 DOT11_FCS_LEN) 
+
+#define DOT11_MAX_SSID_LEN      32  
+
+
+#define DOT11_DEFAULT_RTS_LEN       2347    
+#define DOT11_MAX_RTS_LEN       2347    
+
+
+#define DOT11_MIN_FRAG_LEN      256 
+#define DOT11_MAX_FRAG_LEN      2346    
+#define DOT11_DEFAULT_FRAG_LEN      2346    
+
+
+#define DOT11_MIN_BEACON_PERIOD     1   
+#define DOT11_MAX_BEACON_PERIOD     0xFFFF  
+
+
+#define DOT11_MIN_DTIM_PERIOD       1   
+#define DOT11_MAX_DTIM_PERIOD       0xFF    
+
+
+#define DOT11_LLC_SNAP_HDR_LEN      8   
+#define DOT11_OUI_LEN           3   
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+	uint8   dsap;               
+	uint8   ssap;               
+	uint8   ctl;                
+	uint8   oui[DOT11_OUI_LEN];     
+	uint16  type;               
+} BWL_POST_PACKED_STRUCT;
+
+
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)    
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   a1;     
+	struct ether_addr   a2;     
+	struct ether_addr   a3;     
+	uint16          seq;        
+	struct ether_addr   a4;     
+} BWL_POST_PACKED_STRUCT;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   ra;     
+	struct ether_addr   ta;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_RTS_LEN       16      
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   ra;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTS_LEN       10      
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   ra;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACK_LEN       10      
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   bssid;      
+	struct ether_addr   ta;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_PS_POLL_LEN   16      
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   ra;     
+	struct ether_addr   bssid;      
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CS_END_LEN    16      
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+	uint8   category;
+	uint8   OUI[3];
+	uint8   type;
+	uint8   subtype;
+	uint8   data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+	uint8   category;
+	uint8   OUI[3];
+	uint8   type;
+	uint8   subtype;
+	uint8   data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+#define DOT11_ACTION_VS_HDR_LEN 6
+
+#define BCM_ACTION_OUI_BYTE0    0x00
+#define BCM_ACTION_OUI_BYTE1    0x90
+#define BCM_ACTION_OUI_BYTE2    0x4c
+
+
+#define DOT11_BA_CTL_POLICY_NORMAL  0x0000  
+#define DOT11_BA_CTL_POLICY_NOACK   0x0001  
+#define DOT11_BA_CTL_POLICY_MASK    0x0001  
+
+#define DOT11_BA_CTL_MTID       0x0002  
+#define DOT11_BA_CTL_COMPRESSED     0x0004  
+
+#define DOT11_BA_CTL_NUMMSDU_MASK   0x0FC0  
+#define DOT11_BA_CTL_NUMMSDU_SHIFT  6   
+
+#define DOT11_BA_CTL_TID_MASK       0xF000  
+#define DOT11_BA_CTL_TID_SHIFT      12  
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   ra;     
+	struct ether_addr   ta;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN   16      
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+	uint16          bar_control;    
+	uint16          seqnum;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN       4       
+
+#define DOT11_BA_BITMAP_LEN 128     
+#define DOT11_BA_CMP_BITMAP_LEN 8       
+
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+	uint16          ba_control; 
+	uint16          seqnum;     
+	uint8           bitmap[DOT11_BA_BITMAP_LEN];    
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN        4       
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+	uint16          fc;     
+	uint16          durid;      
+	struct ether_addr   da;     
+	struct ether_addr   sa;     
+	struct ether_addr   bssid;      
+	uint16          seq;        
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_HDR_LEN  24      
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+	uint32          timestamp[2];
+	uint16          beacon_interval;
+	uint16          capability;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BCN_PRB_LEN   12      
+#define DOT11_BCN_PRB_FIXED_LEN 12      
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+	uint16          alg;        
+	uint16          seq;        
+	uint16          status;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN    6       
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+	uint16          capability; 
+	uint16          listen;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN   4   
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+	uint16          capability; 
+	uint16          listen;     
+	struct ether_addr   ap;     
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN 10  
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+	uint16          capability; 
+	uint16          status;     
+	uint16          aid;        
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN  6   
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+	uint8   category;
+	uint8   action;
+	uint8   token;
+	uint8   data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN    3   
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+	uint8   category;
+	uint8   action;
+	uint8   ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+	uint8   category;
+	uint8   action;
+	uint8   control;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE   1
+#define SM_PWRSAVE_MODE     2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+	uint8 id;
+	uint8 len;
+	uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+	uint8 min;
+	uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+	uint8 id;
+	uint8 len;
+	uint8 tx_pwr;
+	uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2   
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+	uint8 id;
+	uint8 len;
+	uint8 first_channel;
+	uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+	uint8   id;     
+	uint8   len;        
+	uint8   extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+	uint8   id;     
+	uint8   len;        
+	uint8   oui[3];     
+	uint8   type;           
+	uint8   extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN   5
+#define BRCM_EXTCH_IE_TYPE  53  
+#define DOT11_EXTCH_IE_LEN  1
+#define DOT11_EXT_CH_MASK   0x03    
+#define DOT11_EXT_CH_UPPER  0x01    
+#define DOT11_EXT_CH_LOWER  0x03    
+#define DOT11_EXT_CH_NONE   0x00    
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+	uint8   category;
+	uint8   action;
+	uint8   data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+	uint8 id;   
+	uint8 len;  
+	uint8 mode; 
+	uint8 channel;  
+	uint8 count;    
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN 3   
+
+#define DOT11_CSA_MODE_ADVISORY     0   
+#define DOT11_CSA_MODE_NO_TX        1   
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+	uint8   category;
+	uint8   action;
+	dot11_chan_switch_ie_t chan_switch_ie;  
+	dot11_brcm_extch_ie_t extch_ie;     
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+	uint8 mode; 
+	uint8 reg;  
+	uint8 channel;  
+	uint8 count;    
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+	uint8 id;   
+	uint8 len;  
+	struct dot11_csa_body b;    
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN    4   
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+	uint8   category;
+	uint8   action;
+	dot11_ext_csa_ie_t chan_switch_ie;  
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+	uint8   category;
+	uint8   action;
+	struct dot11_csa_body b;    
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+	uint8   id;
+	uint8   len;
+	uint8   info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN 1   
+
+#define DOT11_OBSS_COEX_INFO_REQ        0x01
+#define DOT11_OBSS_COEX_40MHZ_INTOLERANT    0x02
+#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+	uint8   id;
+	uint8   len;
+	uint8   regclass;
+	uint8   chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN   1   
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+	uint8 id;
+	uint8 len;
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+#define DOT11_EXTCAP_LEN    1
+
+
+
+#define DOT11_MEASURE_TYPE_BASIC    0   
+#define DOT11_MEASURE_TYPE_CCA      1   
+#define DOT11_MEASURE_TYPE_RPI      2   
+#define DOT11_MEASURE_TYPE_CHLOAD       3   
+#define DOT11_MEASURE_TYPE_NOISE        4   
+#define DOT11_MEASURE_TYPE_BEACON       5   
+#define DOT11_MEASURE_TYPE_FRAME    6   
+#define DOT11_MEASURE_TYPE_STATS        7   
+#define DOT11_MEASURE_TYPE_LCI      8   
+#define DOT11_MEASURE_TYPE_TXSTREAM     9   
+#define DOT11_MEASURE_TYPE_PAUSE        255 
+
+
+#define DOT11_MEASURE_MODE_PARALLEL     (1<<0)  
+#define DOT11_MEASURE_MODE_ENABLE   (1<<1)  
+#define DOT11_MEASURE_MODE_REQUEST  (1<<2)  
+#define DOT11_MEASURE_MODE_REPORT   (1<<3)  
+#define DOT11_MEASURE_MODE_DUR  (1<<4)  
+
+#define DOT11_MEASURE_MODE_LATE     (1<<0)  
+#define DOT11_MEASURE_MODE_INCAPABLE    (1<<1)  
+#define DOT11_MEASURE_MODE_REFUSED  (1<<2)  
+
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) 
+#define DOT11_MEASURE_BASIC_MAP_OFDM    ((uint8)(1<<1)) 
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN  ((uint8)(1<<2)) 
+#define DOT11_MEASURE_BASIC_MAP_RADAR   ((uint8)(1<<3)) 
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS  ((uint8)(1<<4)) 
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14    
+
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3   
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 channel;
+			uint8 start_time[8];
+			uint16 duration;
+			uint8 map;
+		} BWL_POST_PACKED_STRUCT basic;
+		uint8 data[1];
+	} BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3   
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN 12  
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+	uint8 id;
+	uint8 len;
+	uint8 count;    
+	uint8 period;   
+	uint16 duration;    
+	uint16 offset;  
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+	uint8 channel;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+	uint8 id;
+	uint8 len;
+	uint8 eaddr[ETHER_ADDR_LEN];
+	uint8 interval;
+	chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+
+#define WME_OUI         "\x00\x50\xf2"  
+#define WME_OUI_LEN     3
+#define WME_OUI_TYPE        2   
+#define WME_VER         1   
+#define WME_TYPE        2   
+#define WME_SUBTYPE_IE      0   
+#define WME_SUBTYPE_PARAM_IE    1   
+#define WME_SUBTYPE_TSPEC   2   
+
+
+#define AC_BE           0   
+#define AC_BK           1   
+#define AC_VI           2   
+#define AC_VO           3   
+#define AC_COUNT        4   
+
+typedef uint8 ac_bitmap_t;  
+
+#define AC_BITMAP_NONE      0x0 
+#define AC_BITMAP_ALL       0xf 
+#define AC_BITMAP_TST(ab, ac)   (((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac)   (((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7    
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+	uint8   ACI;
+	uint8   ECW;
+	uint16  TXOP;       
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN            24          
+
+
+#define WME_QI_AP_APSD_MASK         0x80        
+#define WME_QI_AP_APSD_SHIFT        7           
+#define WME_QI_AP_COUNT_MASK        0x0f        
+#define WME_QI_AP_COUNT_SHIFT       0           
+
+
+#define WME_QI_STA_MAXSPLEN_MASK    0x60        
+#define WME_QI_STA_MAXSPLEN_SHIFT   5           
+#define WME_QI_STA_APSD_ALL_MASK    0xf         
+#define WME_QI_STA_APSD_ALL_SHIFT   0           
+#define WME_QI_STA_APSD_BE_MASK     0x8         
+#define WME_QI_STA_APSD_BE_SHIFT    3           
+#define WME_QI_STA_APSD_BK_MASK     0x4         
+#define WME_QI_STA_APSD_BK_SHIFT    2           
+#define WME_QI_STA_APSD_VI_MASK     0x2         
+#define WME_QI_STA_APSD_VI_SHIFT    1           
+#define WME_QI_STA_APSD_VO_MASK     0x1         
+#define WME_QI_STA_APSD_VO_SHIFT    0           
+
+
+#define EDCF_AIFSN_MIN               1           
+#define EDCF_AIFSN_MAX               15          
+#define EDCF_AIFSN_MASK              0x0f        
+#define EDCF_ACM_MASK                0x10        
+#define EDCF_ACI_MASK                0x60        
+#define EDCF_ACI_SHIFT               5           
+#define EDCF_AIFSN_SHIFT             12          
+
+
+#define EDCF_ECW_MIN                 0           
+#define EDCF_ECW_MAX                 15          
+#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK             0x0f        
+#define EDCF_ECWMAX_MASK             0xf0        
+#define EDCF_ECWMAX_SHIFT            4           
+
+
+#define EDCF_TXOP_MIN                0           
+#define EDCF_TXOP_MAX                65535       
+#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
+
+
+#define NON_EDCF_AC_BE_ACI_STA          0x02
+
+
+#define EDCF_AC_BE_ACI_STA           0x03   
+#define EDCF_AC_BE_ECW_STA           0xA4   
+#define EDCF_AC_BE_TXOP_STA          0x0000 
+#define EDCF_AC_BK_ACI_STA           0x27   
+#define EDCF_AC_BK_ECW_STA           0xA4   
+#define EDCF_AC_BK_TXOP_STA          0x0000 
+#define EDCF_AC_VI_ACI_STA           0x42   
+#define EDCF_AC_VI_ECW_STA           0x43   
+#define EDCF_AC_VI_TXOP_STA          0x005e 
+#define EDCF_AC_VO_ACI_STA           0x62   
+#define EDCF_AC_VO_ECW_STA           0x32   
+#define EDCF_AC_VO_TXOP_STA          0x002f 
+
+
+#define EDCF_AC_BE_ACI_AP            0x03   
+#define EDCF_AC_BE_ECW_AP            0x64   
+#define EDCF_AC_BE_TXOP_AP           0x0000 
+#define EDCF_AC_BK_ACI_AP            0x27   
+#define EDCF_AC_BK_ECW_AP            0xA4   
+#define EDCF_AC_BK_TXOP_AP           0x0000 
+#define EDCF_AC_VI_ACI_AP            0x41   
+#define EDCF_AC_VI_ECW_AP            0x43   
+#define EDCF_AC_VI_TXOP_AP           0x005e 
+#define EDCF_AC_VO_ACI_AP            0x61   
+#define EDCF_AC_VO_ECW_AP            0x32   
+#define EDCF_AC_VO_TXOP_AP           0x002f 
+
+
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN            18          
+
+
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+	uint8 id;           
+	uint8 length;
+	uint16 station_count;       
+	uint8 channel_utilization;  
+	uint16 aac;             
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+
+
+#define FIXED_MSDU_SIZE 0x8000      
+#define MSDU_SIZE_MASK  0x7fff      
+
+
+
+#define INTEGER_SHIFT   13  
+#define FRACTION_MASK   0x1FFF  
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+	uint8 category;         
+	uint8 action;
+	uint8 token;
+	uint8 status;
+	uint8 data[1];          
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4   
+
+
+#define WME_ADDTS_REQUEST   0   
+#define WME_ADDTS_RESPONSE  1   
+#define WME_DELTS_REQUEST   2   
+
+
+#define WME_ADMISSION_ACCEPTED      0   
+#define WME_INVALID_PARAMETERS      1   
+#define WME_ADMISSION_REFUSED       3   
+
+
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+
+#define DOT11_OPEN_SYSTEM   0   
+#define DOT11_SHARED_KEY    1   
+#define DOT11_OPEN_SHARED   2   
+#define DOT11_FAST_BSS      3   
+#define DOT11_CHALLENGE_LEN 128 
+
+
+#define FC_PVER_MASK        0x3 
+#define FC_PVER_SHIFT       0   
+#define FC_TYPE_MASK        0xC 
+#define FC_TYPE_SHIFT       2   
+#define FC_SUBTYPE_MASK     0xF0    
+#define FC_SUBTYPE_SHIFT    4   
+#define FC_TODS         0x100   
+#define FC_TODS_SHIFT       8   
+#define FC_FROMDS       0x200   
+#define FC_FROMDS_SHIFT     9   
+#define FC_MOREFRAG     0x400   
+#define FC_MOREFRAG_SHIFT   10  
+#define FC_RETRY        0x800   
+#define FC_RETRY_SHIFT      11  
+#define FC_PM           0x1000  
+#define FC_PM_SHIFT     12  
+#define FC_MOREDATA     0x2000  
+#define FC_MOREDATA_SHIFT   13  
+#define FC_WEP          0x4000  
+#define FC_WEP_SHIFT        14  
+#define FC_ORDER        0x8000  
+#define FC_ORDER_SHIFT      15  
+
+
+#define SEQNUM_SHIFT        4   
+#define SEQNUM_MAX      0x1000  
+#define FRAGNUM_MASK        0xF 
+
+
+
+
+#define FC_TYPE_MNG     0   
+#define FC_TYPE_CTL     1   
+#define FC_TYPE_DATA        2   
+
+
+#define FC_SUBTYPE_ASSOC_REQ        0   
+#define FC_SUBTYPE_ASSOC_RESP       1   
+#define FC_SUBTYPE_REASSOC_REQ      2   
+#define FC_SUBTYPE_REASSOC_RESP     3   
+#define FC_SUBTYPE_PROBE_REQ        4   
+#define FC_SUBTYPE_PROBE_RESP       5   
+#define FC_SUBTYPE_BEACON       8   
+#define FC_SUBTYPE_ATIM         9   
+#define FC_SUBTYPE_DISASSOC     10  
+#define FC_SUBTYPE_AUTH         11  
+#define FC_SUBTYPE_DEAUTH       12  
+#define FC_SUBTYPE_ACTION       13  
+#define FC_SUBTYPE_ACTION_NOACK     14  
+
+
+#define FC_SUBTYPE_CTL_WRAPPER      7   
+#define FC_SUBTYPE_BLOCKACK_REQ     8   
+#define FC_SUBTYPE_BLOCKACK     9   
+#define FC_SUBTYPE_PS_POLL      10  
+#define FC_SUBTYPE_RTS          11  
+#define FC_SUBTYPE_CTS          12  
+#define FC_SUBTYPE_ACK          13  
+#define FC_SUBTYPE_CF_END       14  
+#define FC_SUBTYPE_CF_END_ACK       15  
+
+
+#define FC_SUBTYPE_DATA         0   
+#define FC_SUBTYPE_DATA_CF_ACK      1   
+#define FC_SUBTYPE_DATA_CF_POLL     2   
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3   
+#define FC_SUBTYPE_NULL         4   
+#define FC_SUBTYPE_CF_ACK       5   
+#define FC_SUBTYPE_CF_POLL      6   
+#define FC_SUBTYPE_CF_ACK_POLL      7   
+#define FC_SUBTYPE_QOS_DATA     8   
+#define FC_SUBTYPE_QOS_DATA_CF_ACK  9   
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10  
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11  
+#define FC_SUBTYPE_QOS_NULL     12  
+#define FC_SUBTYPE_QOS_CF_POLL      14  
+#define FC_SUBTYPE_QOS_CF_ACK_POLL  15  
+
+
+#define FC_SUBTYPE_ANY_QOS(s)       (((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s)      (((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s)   (((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s)    (((s) & 1) != 0)
+
+
+#define FC_KIND_MASK        (FC_TYPE_MASK | FC_SUBTYPE_MASK)    
+
+#define FC_KIND(t, s)   (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))    
+
+#define FC_SUBTYPE(fc)  (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)  
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)    
+
+#define FC_ASSOC_REQ    FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)  
+#define FC_ASSOC_RESP   FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) 
+#define FC_REASSOC_REQ  FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)    
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)   
+#define FC_PROBE_REQ    FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)  
+#define FC_PROBE_RESP   FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) 
+#define FC_BEACON   FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)     
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)   
+#define FC_AUTH     FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)       
+#define FC_DEAUTH   FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)     
+#define FC_ACTION   FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)     
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)   
+
+#define FC_CTL_WRAPPER  FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)    
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)   
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)   
+#define FC_PS_POLL  FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)    
+#define FC_RTS      FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)        
+#define FC_CTS      FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)        
+#define FC_ACK      FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)        
+#define FC_CF_END   FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)     
+#define FC_CF_END_ACK   FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) 
+
+#define FC_DATA     FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)      
+#define FC_NULL_DATA    FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)      
+#define FC_DATA_CF_ACK  FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)   
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)  
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)  
+
+
+
+
+#define QOS_PRIO_SHIFT      0   
+#define QOS_PRIO_MASK       0x0007  
+#define QOS_PRIO(qos)       (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) 
+
+
+#define QOS_TID_SHIFT       0   
+#define QOS_TID_MASK        0x000f  
+#define QOS_TID(qos)        (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)   
+
+
+#define QOS_EOSP_SHIFT      4   
+#define QOS_EOSP_MASK       0x0010  
+#define QOS_EOSP(qos)       (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) 
+
+
+#define QOS_ACK_NORMAL_ACK  0   
+#define QOS_ACK_NO_ACK      1   
+#define QOS_ACK_NO_EXP_ACK  2   
+#define QOS_ACK_BLOCK_ACK   3   
+#define QOS_ACK_SHIFT       5   
+#define QOS_ACK_MASK        0x0060  
+#define QOS_ACK(qos)        (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)   
+
+
+#define QOS_AMSDU_SHIFT     7   
+#define QOS_AMSDU_MASK      0x0080  
+
+
+
+
+
+
+#define DOT11_MNG_AUTH_ALGO_LEN     2   
+#define DOT11_MNG_AUTH_SEQ_LEN      2   
+#define DOT11_MNG_BEACON_INT_LEN    2   
+#define DOT11_MNG_CAP_LEN       2   
+#define DOT11_MNG_AP_ADDR_LEN       6   
+#define DOT11_MNG_LISTEN_INT_LEN    2   
+#define DOT11_MNG_REASON_LEN        2   
+#define DOT11_MNG_AID_LEN       2   
+#define DOT11_MNG_STATUS_LEN        2   
+#define DOT11_MNG_TIMESTAMP_LEN     8   
+
+
+#define DOT11_AID_MASK          0x3fff  
+
+
+#define DOT11_RC_RESERVED       0   
+#define DOT11_RC_UNSPECIFIED        1   
+#define DOT11_RC_AUTH_INVAL     2   
+#define DOT11_RC_DEAUTH_LEAVING     3   
+#define DOT11_RC_INACTIVITY     4   
+#define DOT11_RC_BUSY           5   
+#define DOT11_RC_INVAL_CLASS_2      6   
+#define DOT11_RC_INVAL_CLASS_3      7   
+#define DOT11_RC_DISASSOC_LEAVING   8   
+#define DOT11_RC_NOT_AUTH       9   
+#define DOT11_RC_BAD_PC         10  
+#define DOT11_RC_BAD_CHANNELS       11  
+
+
+
+#define DOT11_RC_UNSPECIFIED_QOS    32  
+#define DOT11_RC_INSUFFCIENT_BW     33  
+#define DOT11_RC_EXCESSIVE_FRAMES   34  
+#define DOT11_RC_TX_OUTSIDE_TXOP    35  
+#define DOT11_RC_LEAVING_QBSS       36  
+#define DOT11_RC_BAD_MECHANISM      37  
+#define DOT11_RC_SETUP_NEEDED       38  
+#define DOT11_RC_TIMEOUT        39  
+
+#define DOT11_RC_MAX            23  
+
+
+#define DOT11_SC_SUCCESS        0   
+#define DOT11_SC_FAILURE        1   
+#define DOT11_SC_CAP_MISMATCH       10  
+#define DOT11_SC_REASSOC_FAIL       11  
+#define DOT11_SC_ASSOC_FAIL     12  
+#define DOT11_SC_AUTH_MISMATCH      13  
+#define DOT11_SC_AUTH_SEQ       14  
+#define DOT11_SC_AUTH_CHALLENGE_FAIL    15  
+#define DOT11_SC_AUTH_TIMEOUT       16  
+#define DOT11_SC_ASSOC_BUSY_FAIL    17  
+#define DOT11_SC_ASSOC_RATE_MISMATCH    18  
+#define DOT11_SC_ASSOC_SHORT_REQUIRED   19  
+#define DOT11_SC_ASSOC_PBCC_REQUIRED    20  
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21  
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED    22  
+#define DOT11_SC_ASSOC_BAD_POWER_CAP    23  
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24  
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED   25  
+#define DOT11_SC_ASSOC_ERPBCC_REQUIRED  26  
+#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27  
+
+#define DOT11_SC_DECLINED       37  
+#define DOT11_SC_INVALID_PARAMS     38  
+#define DOT11_SC_INVALID_AKMP       43  
+#define DOT11_SC_INVALID_MDID       54  
+#define DOT11_SC_INVALID_FTIE       55  
+
+
+#define DOT11_MNG_DS_PARAM_LEN          1   
+#define DOT11_MNG_IBSS_PARAM_LEN        2   
+
+
+#define DOT11_MNG_TIM_FIXED_LEN         3   
+#define DOT11_MNG_TIM_DTIM_COUNT        0   
+#define DOT11_MNG_TIM_DTIM_PERIOD       1   
+#define DOT11_MNG_TIM_BITMAP_CTL        2   
+#define DOT11_MNG_TIM_PVB           3   
+
+
+#define TLV_TAG_OFF     0   
+#define TLV_LEN_OFF     1   
+#define TLV_HDR_LEN     2   
+#define TLV_BODY_OFF        2   
+
+
+#define DOT11_MNG_SSID_ID           0   
+#define DOT11_MNG_RATES_ID          1   
+#define DOT11_MNG_FH_PARMS_ID           2   
+#define DOT11_MNG_DS_PARMS_ID           3   
+#define DOT11_MNG_CF_PARMS_ID           4   
+#define DOT11_MNG_TIM_ID            5   
+#define DOT11_MNG_IBSS_PARMS_ID         6   
+#define DOT11_MNG_COUNTRY_ID            7   
+#define DOT11_MNG_HOPPING_PARMS_ID      8   
+#define DOT11_MNG_HOPPING_TABLE_ID      9   
+#define DOT11_MNG_REQUEST_ID            10  
+#define DOT11_MNG_QBSS_LOAD_ID          11  
+#define DOT11_MNG_EDCA_PARAM_ID         12  
+#define DOT11_MNG_CHALLENGE_ID          16  
+#define DOT11_MNG_PWR_CONSTRAINT_ID     32  
+#define DOT11_MNG_PWR_CAP_ID            33  
+#define DOT11_MNG_TPC_REQUEST_ID        34  
+#define DOT11_MNG_TPC_REPORT_ID         35  
+#define DOT11_MNG_SUPP_CHANNELS_ID      36  
+#define DOT11_MNG_CHANNEL_SWITCH_ID     37  
+#define DOT11_MNG_MEASURE_REQUEST_ID        38  
+#define DOT11_MNG_MEASURE_REPORT_ID     39  
+#define DOT11_MNG_QUIET_ID          40  
+#define DOT11_MNG_IBSS_DFS_ID           41  
+#define DOT11_MNG_ERP_ID            42  
+#define DOT11_MNG_TS_DELAY_ID           43  
+#define DOT11_MNG_HT_CAP            45  
+#define DOT11_MNG_QOS_CAP_ID            46  
+#define DOT11_MNG_NONERP_ID         47  
+#define DOT11_MNG_RSN_ID            48  
+#define DOT11_MNG_EXT_RATES_ID          50  
+#define DOT11_MNG_AP_CHREP_ID       51  
+#define DOT11_MNG_NBR_REP_ID        52  
+#define DOT11_MNG_MDIE_ID       54  
+#define DOT11_MNG_FTIE_ID       55  
+#define DOT11_MNG_FT_TI_ID      56  
+#define DOT11_MNG_REGCLASS_ID           59  
+#define DOT11_MNG_EXT_CSA_ID            60  
+#define DOT11_MNG_HT_ADD            61  
+#define DOT11_MNG_EXT_CHANNEL_OFFSET        62  
+
+
+#define DOT11_MNG_RRM_CAP_ID        70  
+#define DOT11_MNG_HT_BSS_COEXINFO_ID        72  
+#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID  73  
+#define DOT11_MNG_HT_OBSS_ID            74  
+#define DOT11_MNG_EXT_CAP           127 
+#define DOT11_MNG_WPA_ID            221 
+#define DOT11_MNG_PROPR_ID          221 
+
+#define DOT11_MNG_VS_ID             221 
+
+
+#define DOT11_RATE_BASIC            0x80    
+#define DOT11_RATE_MASK             0x7F    
+
+
+#define DOT11_MNG_ERP_LEN           1   
+#define DOT11_MNG_NONERP_PRESENT        0x01    
+#define DOT11_MNG_USE_PROTECTION        0x02    
+#define DOT11_MNG_BARKER_PREAMBLE       0x04    
+
+#define DOT11_MGN_TS_DELAY_LEN      4   
+#define TS_DELAY_FIELD_SIZE         4   
+
+
+#define DOT11_CAP_ESS               0x0001  
+#define DOT11_CAP_IBSS              0x0002  
+#define DOT11_CAP_POLLABLE          0x0004  
+#define DOT11_CAP_POLL_RQ           0x0008  
+#define DOT11_CAP_PRIVACY           0x0010  
+#define DOT11_CAP_SHORT             0x0020  
+#define DOT11_CAP_PBCC              0x0040  
+#define DOT11_CAP_AGILITY           0x0080  
+#define DOT11_CAP_SPECTRUM          0x0100  
+#define DOT11_CAP_SHORTSLOT         0x0400  
+#define DOT11_CAP_RRM           0x1000  
+#define DOT11_CAP_CCK_OFDM          0x2000  
+
+
+#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01    
+
+
+#define DOT11_ACTION_HDR_LEN        2   
+#define DOT11_ACTION_CAT_OFF        0   
+#define DOT11_ACTION_ACT_OFF        1   
+
+
+#define DOT11_ACTION_CAT_ERR_MASK   0x80    
+#define DOT11_ACTION_CAT_MASK       0x7F    
+#define DOT11_ACTION_CAT_SPECT_MNG  0   
+#define DOT11_ACTION_CAT_QOS        1   
+#define DOT11_ACTION_CAT_DLS        2   
+#define DOT11_ACTION_CAT_BLOCKACK   3   
+#define DOT11_ACTION_CAT_PUBLIC     4   
+#define DOT11_ACTION_CAT_RRM        5   
+#define DOT11_ACTION_CAT_FBT    6   
+#define DOT11_ACTION_CAT_HT     7   
+#define DOT11_ACTION_CAT_BSSMGMT    10  
+#define DOT11_ACTION_NOTIFICATION   17
+#define DOT11_ACTION_CAT_VS     127 
+
+
+#define DOT11_SM_ACTION_M_REQ       0   
+#define DOT11_SM_ACTION_M_REP       1   
+#define DOT11_SM_ACTION_TPC_REQ     2   
+#define DOT11_SM_ACTION_TPC_REP     3   
+#define DOT11_SM_ACTION_CHANNEL_SWITCH  4   
+#define DOT11_SM_ACTION_EXT_CSA     5   
+
+
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0   
+#define DOT11_ACTION_ID_HT_MIMO_PS  1   
+
+
+#define DOT11_PUB_ACTION_BSS_COEX_MNG   0   
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4   
+
+
+#define DOT11_BA_ACTION_ADDBA_REQ   0   
+#define DOT11_BA_ACTION_ADDBA_RESP  1   
+#define DOT11_BA_ACTION_DELBA       2   
+
+
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001  
+#define DOT11_ADDBA_PARAM_POLICY_MASK   0x0002  
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT  1   
+#define DOT11_ADDBA_PARAM_TID_MASK  0x003c  
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2   
+#define DOT11_ADDBA_PARAM_BSIZE_MASK    0xffc0  
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT   6   
+
+#define DOT11_ADDBA_POLICY_DELAYED  0   
+#define DOT11_ADDBA_POLICY_IMMEDIATE    1   
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+	uint8 category;             
+	uint8 action;               
+	uint8 token;                
+	uint16 addba_param_set;         
+	uint16 timeout;             
+	uint16 start_seqnum;            
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN     9   
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+	uint8 category;             
+	uint8 action;               
+	uint8 token;                
+	uint16 status;              
+	uint16 addba_param_set;         
+	uint16 timeout;             
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN        9   
+
+
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800  
+#define DOT11_DELBA_PARAM_INIT_SHIFT    11  
+#define DOT11_DELBA_PARAM_TID_MASK  0xf000  
+#define DOT11_DELBA_PARAM_TID_SHIFT 12  
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+	uint8 category;             
+	uint8 action;               
+	uint16 delba_param_set;         
+	uint16 reason;              
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN         6   
+
+
+
+
+
+#define DOT11_RRM_CAP_LEN       5   
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+	uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+
+#define DOT11_RRM_CAP_LINK          0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT   1
+#define DOT11_RRM_CAP_PARALLEL      2
+#define DOT11_RRM_CAP_REPEATED      3
+#define DOT11_RRM_CAP_BCN_PASSIVE   4
+#define DOT11_RRM_CAP_BCN_ACTIVE    5
+#define DOT11_RRM_CAP_BCN_TABLE     6
+#define DOT11_RRM_CAP_BCN_REP_COND  7
+#define DOT11_RRM_CAP_AP_CHANREP    16
+
+
+#define DOT11_RM_ACTION_RM_REQ      0   
+#define DOT11_RM_ACTION_RM_REP      1   
+#define DOT11_RM_ACTION_LM_REQ      2   
+#define DOT11_RM_ACTION_LM_REP      3   
+#define DOT11_RM_ACTION_NR_REQ      4   
+#define DOT11_RM_ACTION_NR_REP      5   
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+	uint8 category;             
+	uint8 action;               
+	uint8 token;                
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+	uint8 category;             
+	uint8 action;               
+	uint8 token;                
+	uint16 reps;                
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN 5
+
+
+#define DOT11_RMREQ_MODE_PARALLEL   1
+#define DOT11_RMREQ_MODE_ENABLE     2
+#define DOT11_RMREQ_MODE_REQUEST    4
+#define DOT11_RMREQ_MODE_REPORT     8
+#define DOT11_RMREQ_MODE_DURMAND    0x10    
+
+
+#define DOT11_RMREP_MODE_LATE       1
+#define DOT11_RMREP_MODE_INCAPABLE  2
+#define DOT11_RMREP_MODE_REFUSED    4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 bcn_mode;
+	struct ether_addr   bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN 18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 frame_info;
+	uint8 rcpi;
+	uint8 rsni;
+	struct ether_addr   bssid;
+	uint8 antenna_id;
+	uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN 26
+
+
+#define DOT11_RMREQ_BCN_PASSIVE 0
+#define DOT11_RMREQ_BCN_ACTIVE  1
+#define DOT11_RMREQ_BCN_TABLE   2
+
+
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID  1
+#define DOT11_RMREQ_BCN_REPDET_ID   2
+#define DOT11_RMREQ_BCN_REQUEST_ID  10
+#define DOT11_RMREQ_BCN_APCHREP_ID  51
+
+
+#define DOT11_RMREQ_BCN_REPDET_FIXED    0   
+#define DOT11_RMREQ_BCN_REPDET_REQUEST  1   
+#define DOT11_RMREQ_BCN_REPDET_ALL  2   
+
+
+#define DOT11_RMREP_BCN_FRM_BODY    1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_nbr {
+	struct ether_addr   bssid;
+	uint32  bssid_info;
+	uint8 reg;
+	uint8 channel;
+	uint8 phytype;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_nbr dot11_rmrep_nbr_t;
+#define DOT11_RMREP_NBR_LEN 13
+
+
+#define DOT11_BSSTYPE_INFRASTRUCTURE        0   
+#define DOT11_BSSTYPE_INDEPENDENT       1   
+#define DOT11_BSSTYPE_ANY           2   
+#define DOT11_SCANTYPE_ACTIVE           0   
+#define DOT11_SCANTYPE_PASSIVE          1   
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+	uint8 category;             
+	uint8 action;               
+	uint8 token;                
+	uint8 txpwr;                
+	uint8 maxtxpwr;             
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+	uint8 category;             
+	uint8 action;               
+	uint8 token;                
+	dot11_tpc_rep_t tpc;            
+	uint8 rxant;                
+	uint8 txant;                
+	uint8 rcpi;             
+	uint8 rsni;             
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN 11
+
+
+#define PREN_PREAMBLE       24  
+#define PREN_MM_EXT     12  
+#define PREN_PREAMBLE_EXT   4   
+
+
+#define RIFS_11N_TIME       2   
+
+
+
+#define HT_SIG1_MCS_MASK        0x00007F
+#define HT_SIG1_CBW             0x000080
+#define HT_SIG1_HT_LENGTH       0xFFFF00
+
+
+#define HT_SIG2_SMOOTHING       0x000001
+#define HT_SIG2_NOT_SOUNDING    0x000002
+#define HT_SIG2_RESERVED        0x000004
+#define HT_SIG2_AGGREGATION     0x000008
+#define HT_SIG2_STBC_MASK       0x000030
+#define HT_SIG2_STBC_SHIFT      4
+#define HT_SIG2_FEC_CODING      0x000040
+#define HT_SIG2_SHORT_GI        0x000080
+#define HT_SIG2_ESS_MASK        0x000300
+#define HT_SIG2_ESS_SHIFT       8
+#define HT_SIG2_CRC             0x03FC00
+#define HT_SIG2_TAIL            0x1C0000
+
+
+#define APHY_SLOT_TIME      9   
+#define APHY_SIFS_TIME      16  
+#define APHY_DIFS_TIME      (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) 
+#define APHY_PREAMBLE_TIME  16  
+#define APHY_SIGNAL_TIME    4   
+#define APHY_SYMBOL_TIME    4   
+#define APHY_SERVICE_NBITS  16  
+#define APHY_TAIL_NBITS     6   
+#define APHY_CWMIN      15  
+
+
+#define BPHY_SLOT_TIME      20  
+#define BPHY_SIFS_TIME      10  
+#define BPHY_DIFS_TIME      50  
+#define BPHY_PLCP_TIME      192 
+#define BPHY_PLCP_SHORT_TIME    96  
+#define BPHY_CWMIN      31  
+
+
+#define DOT11_OFDM_SIGNAL_EXTENSION 6   
+
+#define PHY_CWMAX       1023    
+
+#define DOT11_MAXNUMFRAGS   16  
+
+
+typedef struct d11cnt {
+	uint32      txfrag;     
+	uint32      txmulti;    
+	uint32      txfail;     
+	uint32      txretry;    
+	uint32      txretrie;   
+	uint32      rxdup;      
+	uint32      txrts;      
+	uint32      txnocts;    
+	uint32      txnoack;    
+	uint32      rxfrag;     
+	uint32      rxmulti;    
+	uint32      rxcrc;      
+	uint32      txfrmsnt;   
+	uint32      rxundec;    
+} d11cnt_t;
+
+
+#define BRCM_PROP_OUI       "\x00\x90\x4C"  
+
+
+
+#define BRCM_OUI        "\x00\x10\x18"  
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+	uint8   id;     
+	uint8   len;        
+	uint8   oui[3];     
+	uint8   ver;        
+	uint8   assoc;      
+	uint8   flags;      
+	uint8   flags1;     
+	uint16  amsdu_mtu_pref; 
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN     11  
+#define BRCM_IE_VER     2   
+#define BRCM_IE_LEGACY_AES_VER  1   
+
+
+#ifdef WLAFTERBURNER
+#define BRF_ABCAP       0x1 
+#define BRF_ABRQRD      0x2 
+#define BRF_ABCOUNTER_MASK  0xf0    
+#define BRF_ABCOUNTER_SHIFT 4   
+#endif 
+#define BRF_LZWDS       0x4 
+#define BRF_BLOCKACK        0x8 
+
+
+#define BRF1_AMSDU      0x1 
+#define BRF1_WMEPS      0x4 
+#define BRF1_PSOFIX     0x8 
+#define BRF1_RX_LARGE_AGG   0x10    
+#define BRF1_SOFTAP             0x40    
+
+#ifdef WLAFTERBURNER
+#define AB_WDS_TIMEOUT_MAX  15  
+#define AB_WDS_TIMEOUT_MIN  1   
+#endif
+
+#define AB_GUARDCOUNT   10      
+
+
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+	uchar id;
+	uchar len;
+	uchar oui [3];
+	uchar data [1];     
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN     2   
+#define VNDR_IE_MIN_LEN     3   
+#define VNDR_IE_MAX_LEN     256 
+
+
+#define MCSSET_LEN  16  
+#define MAX_MCS_NUM (128)   
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+	uint16  cap;
+	uint8   params;
+	uint8   supp_mcs[MCSSET_LEN];
+	uint16  ext_htcap;
+	uint32  txbf_cap;
+	uint8   as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+	uint8   id;     
+	uint8   len;        
+	uint8   oui[3];     
+	uint8   type;           
+	ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD 4   
+#define HT_CAP_IE_LEN       26  
+#define HT_CAP_IE_TYPE      51
+
+#define HT_CAP_LDPC_CODING  0x0001  
+#define HT_CAP_40MHZ        0x0002  
+#define HT_CAP_MIMO_PS_MASK 0x000C  
+#define HT_CAP_MIMO_PS_SHIFT    0x0002  
+#define HT_CAP_MIMO_PS_OFF  0x0003  
+#define HT_CAP_MIMO_PS_RTS  0x0001  
+#define HT_CAP_MIMO_PS_ON   0x0000  
+#define HT_CAP_GF       0x0010  
+#define HT_CAP_SHORT_GI_20  0x0020  
+#define HT_CAP_SHORT_GI_40  0x0040  
+#define HT_CAP_TX_STBC      0x0080  
+#define HT_CAP_RX_STBC_MASK 0x0300  
+#define HT_CAP_RX_STBC_SHIFT    8   
+#define HT_CAP_DELAYED_BA   0x0400  
+#define HT_CAP_MAX_AMSDU    0x0800  
+#define HT_CAP_DSSS_CCK 0x1000  
+#define HT_CAP_PSMP     0x2000  
+#define HT_CAP_40MHZ_INTOLERANT 0x4000  
+#define HT_CAP_LSIG_TXOP    0x8000  
+
+#define HT_CAP_RX_STBC_NO       0x0 
+#define HT_CAP_RX_STBC_ONE_STREAM   0x1 
+#define HT_CAP_RX_STBC_TWO_STREAM   0x2 
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3 
+
+#define HT_MAX_AMSDU        7935    
+#define HT_MIN_AMSDU        3835    
+
+#define HT_PARAMS_RX_FACTOR_MASK    0x03    
+#define HT_PARAMS_DENSITY_MASK      0x1C    
+#define HT_PARAMS_DENSITY_SHIFT 2   
+
+
+#define AMPDU_MAX_MPDU_DENSITY  7   
+#define AMPDU_RX_FACTOR_8K  0   
+#define AMPDU_RX_FACTOR_16K 1   
+#define AMPDU_RX_FACTOR_32K 2   
+#define AMPDU_RX_FACTOR_64K 3   
+#define AMPDU_RX_FACTOR_BASE    8*1024  
+
+#define AMPDU_DELIMITER_LEN 4   
+#define AMPDU_DELIMITER_LEN_MAX 63  
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+	uint8   ctl_ch;         
+	uint8   byte1;          
+	uint16  opmode;         
+	uint16  misc_bits;      
+	uint8   basic_mcs[MCSSET_LEN];  
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+	uint8   id;     
+	uint8   len;        
+	uint8   oui[3];     
+	uint8   type;       
+	ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN   22
+#define HT_ADD_IE_TYPE  52
+
+
+#define HT_BW_ANY       0x04    
+#define HT_RIFS_PERMITTED       0x08    
+
+
+#define HT_OPMODE_MASK          0x0003  
+#define HT_OPMODE_SHIFT     0   
+#define HT_OPMODE_PURE      0x0000  
+#define HT_OPMODE_OPTIONAL  0x0001  
+#define HT_OPMODE_HT20IN40  0x0002  
+#define HT_OPMODE_MIXED 0x0003  
+#define HT_OPMODE_NONGF 0x0004  
+#define DOT11N_TXBURST      0x0008  
+#define DOT11N_OBSS_NONHT   0x0010  
+
+
+#define HT_BASIC_STBC_MCS   0x007f  
+#define HT_DUAL_STBC_PROT   0x0080  
+#define HT_SECOND_BCN       0x0100  
+#define HT_LSIG_TXOP        0x0200  
+#define HT_PCO_ACTIVE       0x0400  
+#define HT_PCO_PHASE        0x0800  
+
+
+#define DOT11N_2G_TXBURST_LIMIT 6160    
+#define DOT11N_5G_TXBURST_LIMIT 3080    
+
+
+#define GET_HT_OPMODE(add_ie)       ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					>> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie)    ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_MIXED) 
+#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_HT20IN40)  
+#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_OPTIONAL)  
+#define HT_USE_PROTECTION(add_ie)   (HT_HT20_PRESENT((add_ie)) || \
+					HT_MIXEDMODE_PRESENT((add_ie))) 
+#define HT_NONGF_PRESENT(add_ie)    ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+					== HT_OPMODE_NONGF) 
+#define DOT11N_TXBURST_PRESENT(add_ie)  ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+					== DOT11N_TXBURST)  
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie)   ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+					== DOT11N_OBSS_NONHT)   
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+	uint16  passive_dwell;
+	uint16  active_dwell;
+	uint16  bss_widthscan_interval;
+	uint16  passive_total;
+	uint16  active_total;
+	uint16  chanwidth_transition_dly;
+	uint16  activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+	uint8   id;
+	uint8   len;
+	obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN  sizeof(obss_params_t)   
+
+
+#define HT_CTRL_LA_TRQ      0x00000002  
+#define HT_CTRL_LA_MAI      0x0000003C  
+#define HT_CTRL_LA_MAI_SHIFT    2
+#define HT_CTRL_LA_MAI_MRQ  0x00000004  
+#define HT_CTRL_LA_MAI_MSI  0x00000038  
+#define HT_CTRL_LA_MFSI     0x000001C0  
+#define HT_CTRL_LA_MFSI_SHIFT   6
+#define HT_CTRL_LA_MFB_ASELC    0x0000FE00  
+#define HT_CTRL_LA_MFB_ASELC_SH 9
+#define HT_CTRL_LA_ASELC_CMD    0x00000C00  
+#define HT_CTRL_LA_ASELC_DATA   0x0000F000  
+#define HT_CTRL_CAL_POS     0x00030000  
+#define HT_CTRL_CAL_SEQ     0x000C0000  
+#define HT_CTRL_CSI_STEERING    0x00C00000  
+#define HT_CTRL_CSI_STEER_SHIFT 22
+#define HT_CTRL_CSI_STEER_NFB   0       
+#define HT_CTRL_CSI_STEER_CSI   1       
+#define HT_CTRL_CSI_STEER_NCOM  2       
+#define HT_CTRL_CSI_STEER_COM   3       
+#define HT_CTRL_NDP_ANNOUNCE    0x01000000  
+#define HT_CTRL_AC_CONSTRAINT   0x40000000  
+#define HT_CTRL_RDG_MOREPPDU    0x80000000  
+
+#define HT_OPMODE_OPTIONAL  0x0001  
+#define HT_OPMODE_HT20IN40  0x0002  
+#define HT_OPMODE_MIXED 0x0003  
+#define HT_OPMODE_NONGF 0x0004  
+#define DOT11N_TXBURST      0x0008  
+#define DOT11N_OBSS_NONHT   0x0010  
+
+
+
+#define WPA_OUI         "\x00\x50\xF2"  
+#define WPA_OUI_LEN     3       
+#define WPA_OUI_TYPE        1
+#define WPA_VERSION     1   
+#define WPA2_OUI        "\x00\x0F\xAC"  
+#define WPA2_OUI_LEN        3       
+#define WPA2_VERSION        1   
+#define WPA2_VERSION_LEN    2   
+
+
+#define WPS_OUI         "\x00\x50\xF2"  
+#define WPS_OUI_LEN     3       
+#define WPS_OUI_TYPE        4
+
+
+#define WFA_OUI         "\x50\x6F\x9A"  
+#define WFA_OUI_LEN 3   
+
+#define WFA_OUI_TYPE_WPA    1
+#define WFA_OUI_TYPE_WPS    4
+#define WFA_OUI_TYPE_TPC    8
+#define WFA_OUI_TYPE_P2P    9
+
+
+#define RSN_AKM_NONE        0   
+#define RSN_AKM_UNSPECIFIED 1   
+#define RSN_AKM_PSK     2   
+#define RSN_AKM_FBT_1X      3   
+#define RSN_AKM_FBT_PSK     4   
+
+
+#define DOT11_MAX_DEFAULT_KEYS  4   
+#define DOT11_MAX_KEY_SIZE  32  
+#define DOT11_MAX_IV_SIZE   16  
+#define DOT11_EXT_IV_FLAG   (1<<5)  
+#define DOT11_WPA_KEY_RSC_LEN   8       
+
+#define WEP1_KEY_SIZE       5   
+#define WEP1_KEY_HEX_SIZE   10  
+#define WEP128_KEY_SIZE     13  
+#define WEP128_KEY_HEX_SIZE 26  
+#define TKIP_MIC_SIZE       8   
+#define TKIP_EOM_SIZE       7   
+#define TKIP_EOM_FLAG       0x5a    
+#define TKIP_KEY_SIZE       32  
+#define TKIP_MIC_AUTH_TX    16  
+#define TKIP_MIC_AUTH_RX    24  
+#define TKIP_MIC_SUP_RX     TKIP_MIC_AUTH_TX    
+#define TKIP_MIC_SUP_TX     TKIP_MIC_AUTH_RX    
+#define AES_KEY_SIZE        16  
+#define AES_MIC_SIZE        8   
+
+
+#define WCN_OUI         "\x00\x50\xf2"  
+#define WCN_TYPE        4   
+
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mdid;        
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS 0x01    
+#define FBT_MDID_CAP_RRP    0x02    
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mic_control;     
+	uint8 mic[16];
+	uint8 anonce[32];
+	uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+	uint8 id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_len;
+	uint8 rsc[8];
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
new file mode 100644
index 0000000..4ccfab0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
@@ -0,0 +1,45 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11_bta.h,v 9.2 2008-10-28 23:27:13 Exp $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT		"\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP				1
+#define	BTA_PROT_ACTIVITY_REPORT		2
+#define BTA_PROT_SECURITY			3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST	4
+#define BTA_PROT_LINK_SUPERVISION_REPLY		5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS			1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS		2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS		3
+#define BTA_TYPE_ID_CAPABILITIES		4
+#define BTA_TYPE_ID_VERSION			5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
new file mode 100644
index 0000000..ce8ad08
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
@@ -0,0 +1,131 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h,v 1.6 2008-12-01 22:55:11 Exp $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN           2           /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF          2           /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET	0		/* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET		1		/* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET		2		/* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET		3		/* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+	uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+	uint8 oui[DOT11_OUI_LEN];	/* WME_OUI */
+	uint8 type;					/* WME_TYPE */
+	uint8 subtype;				/* WME_SUBTYPE_TSPEC */
+	uint8 version;				/* WME_VERSION */
+	tsinfo_t tsinfo;			/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;	/* Minimum Service Interval (us) */
+	uint32 max_srv_interval;	/* Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/* Inactivity Interval (us) */
+	uint32 suspension_interval; /* Suspension Interval (us) */
+	uint32 srv_start_time;		/* Service Start Time (us) */
+	uint32 min_data_rate;		/* Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/* Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/* Peak Data Rate (bps) */
+	uint32 max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint32 delay_bound;			/* Delay Bound (us) */
+	uint32 min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;			/* Surplus Bandwidth Allowance (range 1.0-8.0) */
+	uint16 medium_time;			/* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN	(sizeof(tspec_t))		/* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT		1	/* TS info. TID shift */
+#define TS_INFO_TID_MASK		(0xf << TS_INFO_TID_SHIFT)	/* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT	7	/* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK	(0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT	5	/* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK	(0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT		2		/* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK		(1 << TS_INFO_PSB_SHIFT)	/* TS info. PSB mask */
+#define TS_INFO_UPLINK			(0 << TS_INFO_DIRECTION_SHIFT)	/* TS info. uplink */
+#define TS_INFO_DOWNLINK		(1 << TS_INFO_DIRECTION_SHIFT)	/* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL	(3 << TS_INFO_DIRECTION_SHIFT)	/* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT	3	/* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK	(0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt)	((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt)	((((pt).octets[0]) & \
+	TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt)	((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt)	((((pt).octets[1]) & \
+	TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id)	((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+	((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio)	((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+	((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN		5	/* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF		3	/* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT		300	/* default ADDTS response timeout in ms */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED	0	/* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM	1	/* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW	3	/* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE	47	/* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS		36	/* STA leave QBSS */
+#define DOT11E_STATUS_END_TS				37	/* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS			38	/* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT		39	/* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
new file mode 100644
index 0000000..cf20625
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h,v 9.3 2007-04-10 21:33:06 Exp $
+ */
+
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+
+#define	PRIO_8021D_NONE		2	
+#define	PRIO_8021D_BK		1	
+#define	PRIO_8021D_BE		0	
+#define	PRIO_8021D_EE		3	
+#define	PRIO_8021D_CL		4	
+#define	PRIO_8021D_VI		5	
+#define	PRIO_8021D_VO		6	
+#define	PRIO_8021D_NC		7	
+#define	MAXPRIO			7	
+#define NUMPRIO			(MAXPRIO + 1)
+
+#define ALLPRIO		-1	
+
+
+#define PRIO2PREC(prio) \
+	(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
new file mode 100644
index 0000000..46fa4c9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
@@ -0,0 +1,83 @@
+/*
+ * Broadcom Ethernettype  protocol definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h,v 9.12 2009-12-29 19:57:18 Exp $
+ */
+
+
+
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+
+
+
+
+#define	BCMILCP_SUBTYPE_RATE		1
+#define	BCMILCP_SUBTYPE_LINK		2
+#define	BCMILCP_SUBTYPE_CSA		3
+#define	BCMILCP_SUBTYPE_LARQ		4
+#define BCMILCP_SUBTYPE_VENDOR		5
+#define	BCMILCP_SUBTYPE_FLH		17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG	32769
+#define BCMILCP_SUBTYPE_CERT		32770
+#define BCMILCP_SUBTYPE_SES		32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED		0
+#define BCMILCP_BCM_SUBTYPE_EVENT		1
+#define BCMILCP_BCM_SUBTYPE_SES			2
+
+
+#define BCMILCP_BCM_SUBTYPE_DPT			4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH	8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION		0
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+	uint16	subtype;	
+	uint16	length;
+	uint8	version;	
+	uint8	oui[3];		
+	
+	uint16	usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
new file mode 100644
index 0000000..7f51faa
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
@@ -0,0 +1,311 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h,v 9.64.2.9 2011-02-01 06:24:21 Exp $
+ *
+ */
+
+
+
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION		2	
+#define BCM_MSG_IFNAME_MAX		16	
+
+
+#define WLC_EVENT_MSG_LINK		0x01	
+#define WLC_EVENT_MSG_FLUSHTXQ		0x02	
+#define WLC_EVENT_MSG_GROUP		0x04	
+#define WLC_EVENT_MSG_UNKBSS		0x08	
+#define WLC_EVENT_MSG_UNKIF		0x10	
+
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			
+	uint32	event_type;		
+	uint32	status;			
+	uint32	reason;			
+	uint32	auth_type;		
+	uint32	datalen;		
+	struct ether_addr	addr;	
+	char	ifname[BCM_MSG_IFNAME_MAX]; 
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			
+	uint32	event_type;		
+	uint32	status;			
+	uint32	reason;			
+	uint32	auth_type;		
+	uint32	datalen;		
+	struct ether_addr	addr;	
+	char	ifname[BCM_MSG_IFNAME_MAX]; 
+	uint8	ifidx;			
+	uint8	bsscfgidx;		
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+	struct ether_header eth;
+	bcmeth_hdr_t		bcm_hdr;
+	wl_event_msg_t		event;
+	
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN	(sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+
+#define WLC_E_SET_SSID		0	
+#define WLC_E_JOIN		1	
+#define WLC_E_START		2	
+#define WLC_E_AUTH		3	
+#define WLC_E_AUTH_IND		4	
+#define WLC_E_DEAUTH		5	
+#define WLC_E_DEAUTH_IND	6	
+#define WLC_E_ASSOC		7	
+#define WLC_E_ASSOC_IND		8	
+#define WLC_E_REASSOC		9	
+#define WLC_E_REASSOC_IND	10	
+#define WLC_E_DISASSOC		11	
+#define WLC_E_DISASSOC_IND	12	
+#define WLC_E_QUIET_START	13	
+#define WLC_E_QUIET_END		14	
+#define WLC_E_BEACON_RX		15	
+#define WLC_E_LINK		16	
+#define WLC_E_MIC_ERROR		17	
+#define WLC_E_NDIS_LINK		18	
+#define WLC_E_ROAM		19	
+#define WLC_E_TXFAIL		20	
+#define WLC_E_PMKID_CACHE	21	
+#define WLC_E_RETROGRADE_TSF	22	
+#define WLC_E_PRUNE		23	
+#define WLC_E_AUTOAUTH		24	
+#define WLC_E_EAPOL_MSG		25	
+#define WLC_E_SCAN_COMPLETE	26	
+#define WLC_E_ADDTS_IND		27	
+#define WLC_E_DELTS_IND		28	
+#define WLC_E_BCNSENT_IND	29	
+#define WLC_E_BCNRX_MSG		30	
+#define WLC_E_BCNLOST_MSG	31	
+#define WLC_E_ROAM_PREP		32	
+#define WLC_E_PFN_NET_FOUND	33	
+#define WLC_E_PFN_NET_LOST	34	
+#define WLC_E_RESET_COMPLETE	35
+#define WLC_E_JOIN_START	36
+#define WLC_E_ROAM_START	37
+#define WLC_E_ASSOC_START	38
+#define WLC_E_IBSS_ASSOC	39
+#define WLC_E_RADIO		40
+#define WLC_E_PSM_WATCHDOG	41	
+#define WLC_E_PROBREQ_MSG       44      
+#define WLC_E_SCAN_CONFIRM_IND  45
+#define WLC_E_PSK_SUP		46	
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define	WLC_E_EXCEEDED_MEDIUM_TIME 48	
+#define WLC_E_ICV_ERROR		49	
+#define WLC_E_UNICAST_DECODE_ERROR 50	
+#define WLC_E_MULTICAST_DECODE_ERROR 51 
+#define WLC_E_TRACE		52
+#define WLC_E_BTA_HCI_EVENT	53	
+#define WLC_E_IF		54	
+#ifdef WLP2P
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE 	55	
+#endif
+#define WLC_E_RSSI		56	
+#define WLC_E_PFN_SCAN_COMPLETE	57	
+#define WLC_E_EXTLOG_MSG	58
+#define WLC_E_ACTION_FRAME      59 	
+#define WLC_E_ACTION_FRAME_COMPLETE 60	
+#define WLC_E_PRE_ASSOC_IND	61	
+#define WLC_E_PRE_REASSOC_IND	62	
+#define WLC_E_CHANNEL_ADOPTED	63
+#define WLC_E_AP_STARTED	64	
+#define WLC_E_DFS_AP_STOP	65	
+#define WLC_E_DFS_AP_RESUME	66	
+#define WLC_E_WAI_STA_EVENT	67	
+#define WLC_E_WAI_MSG 		68	
+#define WLC_E_ESCAN_RESULT 	69	
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 	70	
+#if defined(WLP2P)
+#define WLC_E_PROBRESP_MSG	71	
+#define WLC_E_P2P_PROBREQ_MSG	72	
+#endif
+#define WLC_E_DCS_REQUEST 73
+
+#define WLC_E_FIFO_CREDIT_MAP	74 
+
+#define WLC_E_ACTION_FRAME_RX	75	
+#define WLC_E_WAKE_EVENT	76	
+#define WLC_E_RM_COMPLETE	77	
+#define WLC_E_HTSFSYNC		78	
+#define WLC_E_OVERLAY_REQ	79	
+#define WLC_E_CSA_COMPLETE_IND  80
+#define WLC_E_EXCESS_PM_WAKE_EVENT	81	
+#define WLC_E_PFN_SCAN_NONE		82	
+#define WLC_E_PFN_SCAN_ALLGONE	83	
+#define WLC_E_LAST		84	
+
+
+typedef struct {
+	uint event;
+	const char *name;
+} bcmevent_name_t;
+
+extern const bcmevent_name_t	bcmevent_names[];
+extern const int		bcmevent_names_size;
+
+
+#define WLC_E_STATUS_SUCCESS		0	
+#define WLC_E_STATUS_FAIL		1	
+#define WLC_E_STATUS_TIMEOUT		2	
+#define WLC_E_STATUS_NO_NETWORKS	3	
+#define WLC_E_STATUS_ABORT		4	
+#define WLC_E_STATUS_NO_ACK		5	
+#define WLC_E_STATUS_UNSOLICITED	6	
+#define WLC_E_STATUS_ATTEMPT		7	
+#define WLC_E_STATUS_PARTIAL		8	
+#define WLC_E_STATUS_NEWSCAN		9	
+#define WLC_E_STATUS_NEWASSOC		10	
+#define WLC_E_STATUS_11HQUIET		11	
+#define WLC_E_STATUS_SUPPRESS		12	
+#define WLC_E_STATUS_NOCHANS		13	
+#define WLC_E_STATUS_CS_ABORT		15	
+#define WLC_E_STATUS_ERROR		16	
+
+
+#define WLC_E_REASON_INITIAL_ASSOC	0	
+#define WLC_E_REASON_LOW_RSSI		1	
+#define WLC_E_REASON_DEAUTH		2	
+#define WLC_E_REASON_DISASSOC		3	
+#define WLC_E_REASON_BCNS_LOST		4	
+#define WLC_E_REASON_MINTXRATE		9	
+#define WLC_E_REASON_TXFAIL		10	
+
+
+#define WLC_E_REASON_FAST_ROAM_FAILED	5	
+#define WLC_E_REASON_DIRECTED_ROAM	6	
+#define WLC_E_REASON_TSPEC_REJECTED	7	
+#define WLC_E_REASON_BETTER_AP		8	
+
+
+#define WLC_E_PRUNE_ENCR_MISMATCH	1	
+#define WLC_E_PRUNE_BCAST_BSSID		2	
+#define WLC_E_PRUNE_MAC_DENY		3	
+#define WLC_E_PRUNE_MAC_NA		4	
+#define WLC_E_PRUNE_REG_PASSV		5	
+#define WLC_E_PRUNE_SPCT_MGMT		6	
+#define WLC_E_PRUNE_RADAR		7	
+#define WLC_E_RSN_MISMATCH		8	
+#define WLC_E_PRUNE_NO_COMMON_RATES	9	
+#define WLC_E_PRUNE_BASIC_RATES		10	
+#define WLC_E_PRUNE_CIPHER_NA		12	
+#define WLC_E_PRUNE_KNOWN_STA		13	
+#define WLC_E_PRUNE_WDS_PEER		15	
+#define WLC_E_PRUNE_QBSS_LOAD		16	
+#define WLC_E_PRUNE_HOME_AP		17	
+
+
+#define WLC_E_SUP_OTHER			0	
+#define WLC_E_SUP_DECRYPT_KEY_DATA	1	
+#define WLC_E_SUP_BAD_UCAST_WEP128	2	
+#define WLC_E_SUP_BAD_UCAST_WEP40	3	
+#define WLC_E_SUP_UNSUP_KEY_LEN		4	
+#define WLC_E_SUP_PW_KEY_CIPHER		5	
+#define WLC_E_SUP_MSG3_TOO_MANY_IE	6	
+#define WLC_E_SUP_MSG3_IE_MISMATCH	7	
+#define WLC_E_SUP_NO_INSTALL_FLAG	8	
+#define WLC_E_SUP_MSG3_NO_GTK		9	
+#define WLC_E_SUP_GRP_KEY_CIPHER	10	
+#define WLC_E_SUP_GRP_MSG1_NO_GTK	11	
+#define WLC_E_SUP_GTK_DECRYPT_FAIL	12	
+#define WLC_E_SUP_SEND_FAIL		13	
+#define WLC_E_SUP_DEAUTH		14	
+#define WLC_E_SUP_WPA_PSK_TMO		15	
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+	uint16	version;
+	uint16	channel;	
+	int32	rssi;
+	uint32	mactime;
+	uint32	rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+
+typedef struct wl_event_data_if {
+	uint8 ifidx;
+	uint8 opcode;		
+	uint8 reserved;
+	uint8 bssidx;		
+	uint8 role;		
+} wl_event_data_if_t;
+
+
+#define WLC_E_IF_ADD		1	
+#define WLC_E_IF_DEL		2	
+#define WLC_E_IF_CHANGE		3	
+
+
+#define WLC_E_IF_ROLE_STA		0	
+#define WLC_E_IF_ROLE_AP		1	
+#define WLC_E_IF_ROLE_WDS		2	
+#define WLC_E_IF_ROLE_P2P_GO		3	
+#define WLC_E_IF_ROLE_P2P_CLIENT	4	
+#define WLC_E_IF_ROLE_BTA_CREATOR	5	
+#define WLC_E_IF_ROLE_BTA_ACCEPTOR	6	
+
+
+#define WLC_E_LINK_BCN_LOSS	1	
+#define WLC_E_LINK_DISASSOC	2	
+#define WLC_E_LINK_ASSOC_REC	3	
+#define WLC_E_LINK_BSSCFG_DIS	4	
+
+
+#define WLC_E_OVL_DOWNLOAD	0	
+#define WLC_E_OVL_UPDATE_IND	1	
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
new file mode 100644
index 0000000..8a8f314
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h,v 9.19 2009-11-10 20:08:33 Exp $
+ */
+
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define IP_VER_OFFSET		0x0	
+#define IP_VER_MASK		0xf0	
+#define IP_VER_SHIFT		4	
+#define IP_VER_4		4	
+#define IP_VER_6		6	
+
+#define IP_VER(ip_body) \
+	((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP		0x1	
+#define IP_PROT_TCP		0x6	
+#define IP_PROT_UDP		0x11	
+
+
+#define IPV4_VER_HL_OFFSET	0	
+#define IPV4_TOS_OFFSET		1	
+#define IPV4_PKTLEN_OFFSET	2	
+#define IPV4_PKTFLAG_OFFSET	6	
+#define IPV4_PROT_OFFSET	9	
+#define IPV4_CHKSUM_OFFSET	10	
+#define IPV4_SRC_IP_OFFSET	12	
+#define IPV4_DEST_IP_OFFSET	16	
+#define IPV4_OPTIONS_OFFSET	20	
+
+
+#define IPV4_VER_MASK		0xf0	
+#define IPV4_VER_SHIFT		4	
+
+#define IPV4_HLEN_MASK		0x0f	
+#define IPV4_HLEN(ipv4_body)	(4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN		4	
+
+#define IPV4_ADDR_NULL(a)	((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+				  ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a)	((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+				  ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define	IPV4_TOS_DSCP_MASK	0xfc	
+#define	IPV4_TOS_DSCP_SHIFT	2	
+
+#define	IPV4_TOS(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define	IPV4_TOS_PREC_MASK	0xe0	
+#define	IPV4_TOS_PREC_SHIFT	5	
+
+#define IPV4_TOS_LOWDELAY	0x10	
+#define IPV4_TOS_THROUGHPUT	0x8	
+#define IPV4_TOS_RELIABILITY	0x4	
+
+#define IPV4_PROT(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV		0x8000	
+#define IPV4_FRAG_DONT		0x4000	
+#define IPV4_FRAG_MORE		0x2000	
+#define IPV4_FRAG_OFFSET_MASK	0x1fff	
+
+#define IPV4_ADDR_STR_LEN	16	
+
+
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+	uint8	addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+	uint8	version_ihl;		
+	uint8	tos;			
+	uint16	tot_len;		
+	uint16	id;
+	uint16	frag;			
+	uint8	ttl;			
+	uint8	prot;			
+	uint16	hdr_chksum;		
+	uint8	src_ip[IPV4_ADDR_LEN];	
+	uint8	dst_ip[IPV4_ADDR_LEN];	
+} BWL_POST_PACKED_STRUCT;
+
+
+#define IPV6_PAYLOAD_LEN_OFFSET	4	
+#define IPV6_NEXT_HDR_OFFSET	6	
+#define IPV6_HOP_LIMIT_OFFSET	7	
+#define IPV6_SRC_IP_OFFSET	8	
+#define IPV6_DEST_IP_OFFSET	24	
+
+
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+	 ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+	 (((uint8 *)(ipv6_body))[2] << 8) | \
+	 (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+	((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+	 ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+	(((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body)	IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN		16	
+
+
+#define IP_TOS46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+
+#include <packed_section_end.h>
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
new file mode 100644
index 0000000..89c1181
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
@@ -0,0 +1,442 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bt_amp_hci.h,v 9.14.8.2 2010-09-10 18:37:47 Exp $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+	uint16 opcode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE		OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE		255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf)	((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode)		((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode)		((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter		HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter	HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality			HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info			HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request	HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link		HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel			HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify			HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode		HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode			HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset				HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush			HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2		HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info		HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands	HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size			HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size		HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];			/* length so far */
+	uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];
+	uint8 len[2];
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+	uint8 plh;
+	uint8 key_length;
+	uint8 key_type;
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+	uint8 plh;
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+	uint8 id;
+	uint8 service_type;
+	uint8 max_sdu[2];
+	uint8 sdu_ia_time[4];
+	uint8 access_latency[4];
+	uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+	uint8 llh[2];
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+	uint8 plh;
+	uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+	uint16 bredr;
+	plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+	hci_handle_t handle;
+	uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+	uint8 llh[2];
+	uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+	uint8 llh[2];
+	uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC		0
+#define EFS_SVCTYPE_BEST_EFFORT		1
+#define EFS_SVCTYPE_GUARANTEED		2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+	uint8 ecode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE			OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete			0x0E
+#define HCI_Command_Status			0x0F
+#define HCI_Flush_Occurred			0x11
+#define HCI_Enhanced_Flush_Complete		0x39
+#define HCI_Physical_Link_Complete		0x40
+#define HCI_Channel_Select			0x41
+#define HCI_Disconnect_Physical_Link_Complete	0x42
+#define HCI_Logical_Link_Complete		0x45
+#define HCI_Disconnect_Logical_Link_Complete	0x46
+#define HCI_Flow_Spec_Modify_Complete		0x47
+#define HCI_Number_of_Completed_Data_Blocks	0x48
+#define HCI_Short_Range_Mode_Change_Complete	0x4C
+#define HCI_Status_Change_Event			0x4D
+#define HCI_Vendor_Specific			0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask			0x0001
+#define HCI_Channel_Select_Event_Mask				0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask	0x0004
+#define HCI_Logical_Link_Complete_Event_Mask			0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask		0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask		0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask		0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask		0x1000
+#define HCI_Status_Change_Event_Mask				0x2000
+#define HCI_All_Event_Mask					0x31e7
+
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+	uint8 status;
+	uint8 cmdpkts;
+	uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+	uint8 cmdpkts;
+	uint16 opcode;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint16 len;
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+	uint8 status;
+	uint8 AMP_status;
+	uint32 bandwidth;
+	uint32 gbandwidth;
+	uint32 latency;
+	uint32 PDU_size;
+	uint8 ctrl_type;
+	uint16 PAL_cap;
+	uint16 AMP_ASSOC_len;
+	uint32 max_flush_timeout;
+	uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+	uint8 status;
+	uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+	uint8 status;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+	uint8 status;
+	uint16 ACL_pkt_len;
+	uint16 data_block_len;
+	uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+	uint16 handle;
+	uint16 pkts;
+	uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+	uint16 num_blocks;
+	uint8 num_handles;
+	data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+	uint8 status;
+	uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+	uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+	uint8 status;
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+	uint8 len;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+	uint8 status;
+	uint8 hci_version;
+	uint16 hci_revision;
+	uint8 pal_version;
+	uint16 mfg_name;
+	uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE	64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+	uint8 status;
+	uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+	uint8 status;
+	uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS				0x00
+#define HCI_ERR_ILLEGAL_COMMAND			0x01
+#define HCI_ERR_NO_CONNECTION			0x02
+#define HCI_ERR_MEMORY_FULL			0x07
+#define HCI_ERR_CONNECTION_TIMEOUT		0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS		0x09
+#define HCI_ERR_CONNECTION_EXISTS		0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED		0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT	0x10
+#define HCI_ERR_UNSUPPORTED_VALUE		0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT		0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST		0x16
+#define HCI_ERR_UNSPECIFIED			0x1F
+#define HCI_ERR_UNIT_KEY_USED			0x26
+#define HCI_ERR_QOS_REJECTED			0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE		0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL		0x39
+#define HCI_ERR_CHANNEL_MOVE			0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+	uint16	handle;			/* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+	uint16	dlen;			/* data total length */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE	OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS		(0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS		(0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle)	((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle)	((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+	uint8	ScheduleKnown;
+	uint8	NumReports;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+	uint32	StartTime;
+	uint32	Duration;
+	uint32	Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN		0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
new file mode 100644
index 0000000..5781d13
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
@@ -0,0 +1,173 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * $Id: eapol.h,v 9.23.86.1 2010-09-02 18:09:39 Exp $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef struct {
+	struct ether_header eth;	/* 802.3/Ethernet header */
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+	unsigned char body[1];		/* Body (optional) */
+} eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION	2
+#define WPA_EAPOL_VERSION	1
+#define LEAP_EAPOL_VERSION	1
+#define SES_EAPOL_VERSION	1
+
+/* EAPOL types */
+#define EAP_PACKET		0
+#define EAPOL_START		1
+#define EAPOL_LOGOFF		2
+#define EAPOL_KEY		3
+#define EAPOL_ASF		4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY		1
+#define EAPOL_WPA2_KEY		2	/* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY		254	/* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN	8
+#define EAPOL_KEY_IV_LEN	16
+#define EAPOL_KEY_SIG_LEN	16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;			/* Key Descriptor Type */
+	unsigned short length;			/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char iv[EAPOL_KEY_IV_LEN];		/* Key IV */
+	unsigned char index;				/* Key Flags & Index */
+	unsigned char signature[EAPOL_KEY_SIG_LEN];	/* Key Signature */
+	unsigned char key[1];				/* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 	44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK	0x80
+#define EAPOL_KEY_BROADCAST	0
+#define EAPOL_KEY_UNICAST	0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK	0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN	8
+#define EAPOL_WPA_KEY_NONCE_LEN		32
+#define EAPOL_WPA_KEY_IV_LEN		16
+#define EAPOL_WPA_KEY_RSC_LEN		8
+#define EAPOL_WPA_KEY_ID_LEN		8
+#define EAPOL_WPA_KEY_MIC_LEN		16
+#define EAPOL_WPA_KEY_DATA_LEN		(EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE		32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;		/* Key Descriptor Type */
+	unsigned short key_info;	/* Key Information (unaligned) */
+	unsigned short key_len;		/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN];	/* Nonce */
+	unsigned char iv[EAPOL_WPA_KEY_IV_LEN];		/* Key IV */
+	unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN];	/* Key RSC */
+	unsigned char id[EAPOL_WPA_KEY_ID_LEN];		/* WPA:Key ID, 802.11i/WPA2: Reserved */
+	unsigned char mic[EAPOL_WPA_KEY_MIC_LEN];	/* Key MIC */
+	unsigned short data_len;			/* Key Data Length */
+	unsigned char data[EAPOL_WPA_KEY_DATA_LEN];	/* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 		95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1		0x01
+#define WPA_KEY_DESC_V2		0x02
+#define WPA_KEY_DESC_V3		0x03
+#define WPA_KEY_PAIRWISE	0x08
+#define WPA_KEY_INSTALL		0x40
+#define WPA_KEY_ACK		0x80
+#define WPA_KEY_MIC		0x100
+#define WPA_KEY_SECURE		0x200
+#define WPA_KEY_ERROR		0x400
+#define WPA_KEY_REQ		0x800
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0		0x00
+#define WPA_KEY_INDEX_1		0x10
+#define WPA_KEY_INDEX_2		0x20
+#define WPA_KEY_INDEX_3		0x30
+#define WPA_KEY_INDEX_MASK	0x30
+#define WPA_KEY_INDEX_SHIFT	0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA	0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 type;
+	uint8 length;
+	uint8 oui[3];
+	uint8 subtype;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 	6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK	1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY	2
+#define WPA2_KEY_DATA_SUBTYPE_MAC	3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID	4
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	flags;
+	uint8	reserved;
+	uint8	gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 	2
+
+#define WPA2_GTK_INDEX_MASK	0x03
+#define WPA2_GTK_INDEX_SHIFT	0x00
+
+#define WPA2_GTK_TRANSMIT	0x04
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	reserved[2];
+	uint8	mac[ETHER_ADDR_LEN];
+	uint8	stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD	0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
new file mode 100644
index 0000000..6a6dd14
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
@@ -0,0 +1,162 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h,v 9.56 2009-10-15 22:54:58 Exp $
+ */
+
+
+#ifndef _NET_ETHERNET_H_	      
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define	ETHER_ADDR_LEN		6
+
+
+#define	ETHER_TYPE_LEN		2
+
+
+#define	ETHER_CRC_LEN		4
+
+
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+
+#define	ETHER_MIN_LEN		64
+
+
+#define	ETHER_MIN_DATA		46
+
+
+#define	ETHER_MAX_LEN		1518
+
+
+#define	ETHER_MAX_DATA		1500
+
+
+#define ETHER_TYPE_MIN		0x0600		
+#define	ETHER_TYPE_IP		0x0800		
+#define ETHER_TYPE_ARP		0x0806		
+#define ETHER_TYPE_8021Q	0x8100		
+#define	ETHER_TYPE_BRCM		0x886c		
+#define	ETHER_TYPE_802_1X	0x888e		
+#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7	
+#define ETHER_TYPE_WAI		0x88b4		
+
+
+
+#define	ETHER_BRCM_SUBTYPE_LEN	4	
+#define	ETHER_BRCM_CRAM		1	
+
+
+#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)	
+#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)	
+#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)	
+
+
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) {		\
+		((uint8 *)ea)[0] = 0x01;			\
+		((uint8 *)ea)[1] = 0x00;			\
+		((uint8 *)ea)[2] = 0x5e;			\
+		((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f;	\
+		((uint8 *)ea)[4] = ((mgrp_ip) >>  8) & 0xff;	\
+		((uint8 *)ea)[5] = ((mgrp_ip) >>  0) & 0xff;	\
+}
+
+#ifndef __INCif_etherh       
+
+BWL_PRE_PACKED_STRUCT struct ether_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct	ether_addr {
+	uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif	
+
+
+#define ETHER_SET_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) 	(((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xd))
+#define ETHER_TOGGLE_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+
+#define ETHER_SET_UNICAST(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+
+#define	ether_cmp(a, b)	(!(((short*)a)[0] == ((short*)b)[0]) | \
+			 !(((short*)a)[1] == ((short*)b)[1]) | \
+			 !(((short*)a)[2] == ((short*)b)[2]))
+
+
+#define	ether_copy(s, d) { \
+		((short*)d)[0] = ((short*)s)[0]; \
+		((short*)d)[1] = ((short*)s)[1]; \
+		((short*)d)[2] = ((short*)s)[2]; }
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+
+#define ETHER_ISBCAST(ea)	((((uint8 *)(ea))[0] &		\
+	                          ((uint8 *)(ea))[1] &		\
+				  ((uint8 *)(ea))[2] &		\
+				  ((uint8 *)(ea))[3] &		\
+				  ((uint8 *)(ea))[4] &		\
+				  ((uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea)	((((uint8 *)(ea))[0] |		\
+				  ((uint8 *)(ea))[1] |		\
+				  ((uint8 *)(ea))[2] |		\
+				  ((uint8 *)(ea))[3] |		\
+				  ((uint8 *)(ea))[4] |		\
+				  ((uint8 *)(ea))[5]) == 0)
+
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+	struct ether_header t; \
+	t = *(struct ether_header *)(s); \
+	*(struct ether_header *)(d) = t; \
+} while (0)
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
new file mode 100644
index 0000000..4a0c9d1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h,v 9.17.2.4 2010-12-15 21:41:21 Exp $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WiFi P2P OUI values */
+#define P2P_OUI         WFA_OUI             /* WiFi P2P OUI */
+#define P2P_VER         WFA_OUI_TYPE_P2P    /* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID       0xdd            /* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+	uint8   id;     /* IE ID: 0xDD */
+	uint8   len;        /* IE length */
+	uint8   OUI[3];     /* WiFi P2P specific OUI: P2P_OUI */
+	uint8   oui_type;   /* Identifies P2P version: P2P_VER */
+	uint8   subelts[1]; /* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN    6
+
+#define P2P_ATTR_ID_OFF         0
+#define P2P_ATTR_LEN_OFF        1
+#define P2P_ATTR_DATA_OFF       3
+
+#define P2P_ATTR_HDR_LEN        3 /* ID + 2-byte length field spec 1.02 */
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS         0   /* Status */
+#define P2P_SEID_MINOR_RC       1   /* Minor Reason Code */
+#define P2P_SEID_P2P_INFO       2   /* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID         3   /* P2P Device ID */
+#define P2P_SEID_INTENT         4   /* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT        5   /* Configuration Timeout */
+#define P2P_SEID_CHANNEL        6   /* Channel */
+#define P2P_SEID_GRP_BSSID      7   /* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING      8   /* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR     9   /* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY      10  /* P2P Manageability */
+#define P2P_SEID_CHAN_LIST      11  /* Channel List */
+#define P2P_SEID_ABSENCE        12  /* Notice of Absence */
+#define P2P_SEID_DEV_INFO       13  /* Device Info */
+#define P2P_SEID_GROUP_INFO     14  /* Group Info */
+#define P2P_SEID_GROUP_ID       15  /* Group ID */
+#define P2P_SEID_P2P_IF         16  /* P2P Interface */
+#define P2P_SEID_VNDR           221 /* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES   0x1b /* BRCM proprietary subel: L2 Services */
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_P2P_INFO */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   dev;        /* Device Capability Bitmap */
+	uint8   group;      /* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS   0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS    0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT    0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN     0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT         0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC       0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER         0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP       0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT         0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS     0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT     0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT    0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION     0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_INTENT */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   intent;     /* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_CFG_TIMEOUT */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   go_tmo;     /* GO config timeout in units of 10 ms */
+	uint8   client_tmo; /* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_STATUS */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   status;     /* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS              0
+				/* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL   1
+				/* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP            P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+				/* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS     2
+				/* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED       3
+				/* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS      4
+				/* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM     5
+				/* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR         6
+				/* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN      7
+				/* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP       8
+				/* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT          9
+				/* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS     10
+				/* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT     11
+				/* Failed, rejected by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+	uint8   eltId;      /* ID: P2P_SEID_EXT_TIMING */
+	uint8   len[2];     /* length not including eltId, len fields */
+	uint8   avail[2];   /* availibility period */
+	uint8   interval[2];    /* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN 10  /* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_INTINTADDR */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   mac[6];     /* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_STATUS */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   band;       /* Regulatory Class (band) */
+	uint8   channel;    /* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+	uint8   band;                       /* Regulatory Class (band) */
+	uint8   num_channels;               /* # of channels in the channel list */
+	uint8   channels[WL_NUMCHANNELS];   /* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_STATUS */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   country[3]; /* Country String */
+	uint8   num_entries;    /* # of channel entries */
+	wifi_p2p_chanlist_entry_t   entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+						/* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+	uint8   eltId;          /* SE ID: P2P_SEID_DEVINFO */
+	uint8   len[2];         /* SE length not including eltId, len fields */
+	uint8   mac[6];         /* P2P Device MAC address */
+	uint16  wps_cfg_meths;      /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8   pri_devtype[8];     /* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN    8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+	uint8   len;
+	uint8   devaddr[ETHER_ADDR_LEN];    /* P2P Device Address */
+	uint8   ifaddr[ETHER_ADDR_LEN];     /* P2P Interface Address */
+	uint8   devcap;             /* Device Capability */
+	uint8   cfg_meths[2];           /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8   pridt[P2P_DEV_TYPE_LEN];    /* Primary Device Type */
+	uint8   secdts;             /* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+	uint8   eltId;
+	uint8   len[2];
+	struct ether_addr   addr;           /* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+	uint8   eltId;      /* SE ID: P2P_SEID_P2P_MGBTY */
+	uint8   len[2];     /* SE length not including eltId, len fields */
+	uint8   mg_bitmap;  /* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG   0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+	uint8   eltId;          /* SE ID: P2P_SEID_GROUP_INFO */
+	uint8   len[2];         /* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+	uint8   category;   /* P2P_AF_CATEGORY */
+	uint8   OUI[3];     /* OUI - P2P_OUI */
+	uint8   type;       /* OUI Type - P2P_VER */
+	uint8   subtype;    /* OUI Subtype - P2P_AF_* */
+	uint8   dialog_token;   /* nonzero, identifies req/resp tranaction */
+	uint8   elts[1];    /* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY     0x7f
+
+#define P2P_AF_FIXED_LEN    7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE    0   /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ     1   /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP     2   /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ      3   /* GO Discoverability Request */
+
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+	uint8   category;   /* P2P_PUB_AF_CATEGORY */
+	uint8   action;     /* P2P_PUB_AF_ACTION */
+	uint8   oui[3];     /* P2P_OUI */
+	uint8   oui_type;   /* OUI type - P2P_VER */
+	uint8   subtype;    /* OUI subtype - P2P_TYPE_* */
+	uint8   dialog_token;   /* nonzero, identifies req/rsp transaction */
+	uint8   elts[1];    /* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN    8
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION   0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ     0   /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP     1   /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF    2   /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ  3   /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP  4   /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ  5   /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP  6   /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7   /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8   /* Provision Discovery Request */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ      P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP      P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF     P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+	uint8   cnt_type;   /* Count/Type */
+	uint32  duration;   /* Duration */
+	uint32  interval;   /* Interval */
+	uint32  start;      /* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+	uint8   eltId;      /* Subelement ID */
+	uint8   len[2];     /* Length */
+	uint8   index;      /* Index */
+	uint8   ops_ctw_parms;  /* CTWindow and OppPS Parameters */
+	wifi_p2p_noa_desc_t desc[1];    /* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN    5
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED   0   /* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT     255 /* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED 1   /* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE    2   /* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK    0x7f
+#define P2P_NOA_OPS_MASK    0x80
+#define P2P_NOA_OPS_SHIFT   7
+
+#define P2P_CTW_MIN 10  /* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define P2PSD_ACTION_CATEGORY       0x04
+				/* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ    0x0a
+				/* Action value for GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP   0x0b
+				/* Action value for GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ    0x0c
+				/* Action value for GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP   0x0d
+				/* Action value for GAS Comback Response AF */
+#define P2PSD_AD_EID                0x6c
+				/* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
+				/* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID          0x00
+				/* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI               P2P_OUI
+				/* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE       P2P_VER
+				/* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID        0xDDDD
+				/* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY     0x00
+				/* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+	SVC_RPOTYPE_ALL = 0,
+	SVC_RPOTYPE_BONJOUR = 1,
+	SVC_RPOTYPE_UPNP = 2,
+	SVC_RPOTYPE_WSD = 3,
+	SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+	P2PSD_RESP_STATUS_SUCCESS = 0,
+	P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+	P2PSD_RESP_STATUS_DATA_NA = 2,
+	P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+	uint8   llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus
+				* Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+				*/
+	uint8   adp_id;     /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+	uint8   id;     /* IE ID: 0x6c - 108 */
+	uint8   len;    /* IE length */
+	wifi_p2psd_adp_tpl_t adp_tpl;  /* Advertisement Protocol Tuple field. Only one
+				* tuple is defined for P2P Service Discovery
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+	uint8   oui_subtype;    /* OUI Subtype: 0x09 */
+	uint16  svc_updi;       /* Service Update Indicator */
+	uint8   svc_tlvs[1];    /* wifi_p2psd_qreq_tlv_t type for service request,
+				* wifi_p2psd_qresp_tlv_t type for service response
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+	uint16  len;            /* Length: 5 plus size of Query Data */
+	uint8   svc_prot;       /* Service Protocol Type */
+	uint8   svc_tscid;      /* Service Transaction ID */
+	uint8   query_data[1];  /* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+	uint16  info_id;    /* Info ID: 0xDDDD */
+	uint16  len;        /* Length of service request TLV, 5 plus the size of request data */
+	uint8   oui[3];     /* WFA OUI: 0x0050F2 */
+	uint8   qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+	wifi_p2psd_adp_ie_t     adp_ie;     /* Advertisement Protocol IE */
+	uint16                  qreq_len;   /* Query Request Length */
+	uint8   qreq_frm[1];    /* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+	uint16  len;                /* Length: 5 plus size of Query Data */
+	uint8   svc_prot;           /* Service Protocol Type */
+	uint8   svc_tscid;          /* Service Transaction ID */
+	uint8   status;             /* Value defined in Table 57 of P2P spec. */
+	uint8   query_data[1];      /* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+	uint16  info_id;    /* Info ID: 0xDDDD */
+	uint16  len;        /* Lenth of service response TLV, 6 plus the size of resp data */
+	uint8   oui[3];     /* WFA OUI: 0x0050F2 */
+	uint8   qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+	uint16      status;     /* Value defined in Table 7-23 of IEEE P802.11u */
+	uint16  cb_delay;       /* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t adp_ie;     /* Advertisement Protocol IE */
+	uint16      qresp_len;  /* Query Response Length */
+	uint8   qresp_frm[1];   /* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+	uint16  status;         /* Value defined in Table 7-23 of IEEE P802.11u */
+	uint8   fragment_id;    /* Fragmentation ID */
+	uint16  cb_delay;       /* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t adp_ie;     /* Advertisement Protocol IE */
+	uint16  qresp_len;      /* Query Response Length */
+	uint8   qresp_frm[1];   /* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+	uint8   category;       /* 0x04 Public Action Frame */
+	uint8   action;         /* 0x6c Advertisement Protocol */
+	uint8   dialog_token;   /* nonzero, identifies req/rsp transaction */
+	uint8   query_data[1];  /* Query Data. wifi_p2psd_gas_ireq_frame_t
+					 * or wifi_p2psd_gas_iresp_frame_t format
+					 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
new file mode 100644
index 0000000..7fe4fbc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
@@ -0,0 +1,76 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h,v 9.2.120.1 2010-11-15 17:56:25 Exp $
+ */
+
+#ifndef	_SD_SPI_H
+#define	_SD_SPI_H
+
+#define SPI_START_M		BITFIELD_MASK(1)	/* Bit [31] 	- Start Bit */
+#define SPI_START_S		31
+#define SPI_DIR_M		BITFIELD_MASK(1)	/* Bit [30] 	- Direction */
+#define SPI_DIR_S		30
+#define SPI_CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S		24
+#define SPI_RW_M		BITFIELD_MASK(1)	/* Bit [23] 	- Read=0, Write=1 */
+#define SPI_RW_S		23
+#define SPI_FUNC_M		BITFIELD_MASK(3)	/* Bits [22:20]	- Function Number */
+#define SPI_FUNC_S		20
+#define SPI_RAW_M		BITFIELD_MASK(1)	/* Bit [19] 	- Read After Wr */
+#define SPI_RAW_S		19
+#define SPI_STUFF_M		BITFIELD_MASK(1)	/* Bit [18] 	- Stuff bit */
+#define SPI_STUFF_S		18
+#define SPI_BLKMODE_M		BITFIELD_MASK(1)	/* Bit [19] 	- Blockmode 1=blk */
+#define SPI_BLKMODE_S		19
+#define SPI_OPCODE_M		BITFIELD_MASK(1)	/* Bit [18] 	- OP Code */
+#define SPI_OPCODE_S		18
+#define SPI_ADDR_M		BITFIELD_MASK(17)	/* Bits [17:1] 	- Address */
+#define SPI_ADDR_S		1
+#define SPI_STUFF0_M		BITFIELD_MASK(1)	/* Bit [0] 	- Stuff bit */
+#define SPI_STUFF0_S		0
+
+#define SPI_RSP_START_M		BITFIELD_MASK(1)	/* Bit [7] 	- Start Bit (always 0) */
+#define SPI_RSP_START_S		7
+#define SPI_RSP_PARAM_ERR_M	BITFIELD_MASK(1)	/* Bit [6] 	- Parameter Error */
+#define SPI_RSP_PARAM_ERR_S	6
+#define SPI_RSP_RFU5_M		BITFIELD_MASK(1)	/* Bit [5] 	- RFU (Always 0) */
+#define SPI_RSP_RFU5_S		5
+#define SPI_RSP_FUNC_ERR_M	BITFIELD_MASK(1)	/* Bit [4] 	- Function number error */
+#define SPI_RSP_FUNC_ERR_S	4
+#define SPI_RSP_CRC_ERR_M	BITFIELD_MASK(1)	/* Bit [3] 	- COM CRC Error */
+#define SPI_RSP_CRC_ERR_S	3
+#define SPI_RSP_ILL_CMD_M	BITFIELD_MASK(1)	/* Bit [2] 	- Illegal Command error */
+#define SPI_RSP_ILL_CMD_S	2
+#define SPI_RSP_RFU1_M		BITFIELD_MASK(1)	/* Bit [1] 	- RFU (Always 0) */
+#define SPI_RSP_RFU1_S		1
+#define SPI_RSP_IDLE_M		BITFIELD_MASK(1)	/* Bit [0] 	- In idle state */
+#define SPI_RSP_IDLE_S		0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN	6	/* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK	0xFE	/* SD Start Block Token */
+#define SDSPI_IDLE_PAD		0xFF	/* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK	0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
new file mode 100644
index 0000000..07fa7e4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
@@ -0,0 +1,70 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h,v 9.7 2009-03-13 01:11:50 Exp $
+ */
+
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define VLAN_VID_MASK		0xfff	
+#define	VLAN_CFI_SHIFT		12	
+#define VLAN_PRI_SHIFT		13	
+
+#define VLAN_PRI_MASK		7	
+
+#define	VLAN_TAG_LEN		4
+#define	VLAN_TAG_OFFSET		(2 * ETHER_ADDR_LEN)	
+
+#define VLAN_TPID		0x8100	
+
+struct ethervlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	vlan_type;		
+	uint16	vlan_tag;		
+	uint16	ether_type;
+};
+
+#define	ETHERVLAN_HDR_LEN	(ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+	struct ethervlan_header t; \
+	t = *(struct ethervlan_header *)(s); \
+	*(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
new file mode 100644
index 0000000..1ff06dc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
@@ -0,0 +1,160 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h,v 1.19 2009-07-13 08:29:58 Exp $
+ */
+
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+
+#include <packed_section_start.h>
+
+
+
+
+#define DOT11_RC_INVALID_WPA_IE		13	
+#define DOT11_RC_MIC_FAILURE		14	
+#define DOT11_RC_4WH_TIMEOUT		15	
+#define DOT11_RC_GTK_UPDATE_TIMEOUT	16	
+#define DOT11_RC_WPA_IE_MISMATCH	17	
+#define DOT11_RC_INVALID_MC_CIPHER	18	
+#define DOT11_RC_INVALID_UC_CIPHER	19	
+#define DOT11_RC_INVALID_AKMP		20	
+#define DOT11_RC_BAD_WPA_VERSION	21	
+#define DOT11_RC_INVALID_WPA_CAP	22	
+#define DOT11_RC_8021X_AUTH_FAIL	23	
+
+#define WPA2_PMKID_LEN	16
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 tag;	
+	uint8 length;	
+	uint8 oui[3];	
+	uint8 oui_type;	
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN	4
+#define WPA_IE_FIXED_LEN	8
+#define WPA_IE_TAG_FIXED_LEN	6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 tag;	
+	uint8 length;	
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN	4
+#define WPA_RSN_IE_TAG_FIXED_LEN	2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 oui[3];
+	uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN	4
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN	2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+
+#define WPA_CIPHER_NONE		0	
+#define WPA_CIPHER_WEP_40	1	
+#define WPA_CIPHER_TKIP		2	
+#define WPA_CIPHER_AES_OCB	3	
+#define WPA_CIPHER_AES_CCM	4	
+#define WPA_CIPHER_WEP_104	5	
+
+
+#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
+				 (cipher) == WPA_CIPHER_WEP_40 || \
+				 (cipher) == WPA_CIPHER_WEP_104 || \
+				 (cipher) == WPA_CIPHER_TKIP || \
+				 (cipher) == WPA_CIPHER_AES_OCB || \
+				 (cipher) == WPA_CIPHER_AES_CCM)
+
+
+#define WPA_TKIP_CM_DETECT	60	
+#define WPA_TKIP_CM_BLOCK	60	
+
+
+#define RSN_CAP_LEN		2	
+
+
+#define RSN_CAP_PREAUTH			0x0001
+#define RSN_CAP_NOPAIRWISE		0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
+#define RSN_CAP_1_REPLAY_CNTR		0
+#define RSN_CAP_2_REPLAY_CNTRS		1
+#define RSN_CAP_4_REPLAY_CNTRS		2
+#define RSN_CAP_16_REPLAY_CNTRS		3
+
+
+#define WPA_CAP_4_REPLAY_CNTRS		RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS		RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+
+#define WPA_CAP_LEN	RSN_CAP_LEN	
+
+#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
+
+
+
+#include <packed_section_end.h>
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 0000000..cbd3749
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,1615 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h,v 13.169.2.14 2011-02-10 23:43:55 Exp $
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+
+#ifndef	_SBCHIPC_H
+#define	_SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	
+
+typedef volatile struct {
+	uint32	chipid;			
+	uint32	capabilities;
+	uint32	corecontrol;		
+	uint32	bist;
+
+	
+	uint32	otpstatus;		
+	uint32	otpcontrol;
+	uint32	otpprog;
+	uint32	otplayout;		
+
+	
+	uint32	intstatus;		
+	uint32	intmask;
+
+	
+	uint32	chipcontrol;		
+	uint32	chipstatus;		
+
+	
+	uint32	jtagcmd;		
+	uint32	jtagir;
+	uint32	jtagdr;
+	uint32	jtagctrl;
+
+	
+	uint32	flashcontrol;		
+	uint32	flashaddress;
+	uint32	flashdata;
+	uint32	PAD[1];
+
+	
+	uint32	broadcastaddress;	
+	uint32	broadcastdata;
+
+	
+	uint32	gpiopullup;		
+	uint32	gpiopulldown;		
+	uint32	gpioin;			
+	uint32	gpioout;		
+	uint32	gpioouten;		
+	uint32	gpiocontrol;		
+	uint32	gpiointpolarity;	
+	uint32	gpiointmask;		
+
+	
+	uint32	gpioevent;
+	uint32	gpioeventintmask;
+
+	
+	uint32	watchdog;		
+
+	
+	uint32	gpioeventintpolarity;
+
+	
+	uint32  gpiotimerval;		
+	uint32  gpiotimeroutmask;
+
+	
+	uint32	clockcontrol_n;		
+	uint32	clockcontrol_sb;	
+	uint32	clockcontrol_pci;	
+	uint32	clockcontrol_m2;	
+	uint32	clockcontrol_m3;	
+	uint32	clkdiv;			
+	uint32	gpiodebugsel;		
+	uint32	capabilities_ext;               	
+
+	
+	uint32	pll_on_delay;		
+	uint32	fref_sel_delay;
+	uint32	slow_clk_ctl;		
+	uint32	PAD;
+
+	
+	uint32	system_clk_ctl;		
+	uint32	clkstatestretch;
+	uint32	PAD[2];
+
+	
+	uint32	bp_addrlow;		
+	uint32	bp_addrhigh;
+	uint32	bp_data;
+	uint32	PAD;
+	uint32	bp_indaccess;
+	
+	uint32	gsioctrl;
+	uint32	gsioaddress;
+	uint32	gsiodata;
+
+	
+	uint32	clkdiv2;
+	uint32	PAD[2];
+
+	
+	uint32	eromptr;		
+
+	
+	uint32	pcmcia_config;		
+	uint32	pcmcia_memwait;
+	uint32	pcmcia_attrwait;
+	uint32	pcmcia_iowait;
+	uint32	ide_config;
+	uint32	ide_memwait;
+	uint32	ide_attrwait;
+	uint32	ide_iowait;
+	uint32	prog_config;
+	uint32	prog_waitcount;
+	uint32	flash_config;
+	uint32	flash_waitcount;
+	uint32	PAD[4];
+	uint32	PAD[40];
+
+	
+	
+	uint32	clk_ctl_st;		
+	uint32	hw_war;
+	uint32	PAD[70];
+
+	
+	uint8	uart0data;		
+	uint8	uart0imr;
+	uint8	uart0fcr;
+	uint8	uart0lcr;
+	uint8	uart0mcr;
+	uint8	uart0lsr;
+	uint8	uart0msr;
+	uint8	uart0scratch;
+	uint8	PAD[248];		
+
+	uint8	uart1data;		
+	uint8	uart1imr;
+	uint8	uart1fcr;
+	uint8	uart1lcr;
+	uint8	uart1mcr;
+	uint8	uart1lsr;
+	uint8	uart1msr;
+	uint8	uart1scratch;
+	uint32	PAD[126];
+
+	
+	
+	uint32	pmucontrol;		
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		
+	uint32	gpioenable;		
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	
+	uint32	chipcontrol_data;	
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		
+	uint32	pmu_xtalfreq;		
+	uint32	PAD[100];
+	uint16	sromotp[768];
+} chipcregs_t;
+
+#endif 
+
+
+#define	CC_CHIPID		0
+#define	CC_CAPABILITIES		4
+#define	CC_CHIPST		0x2c
+#define	CC_EROMPTR		0xfc
+
+
+#define CC_OTPST		0x10
+#define	CC_JTAGCMD		0x30
+#define	CC_JTAGIR		0x34
+#define	CC_JTAGDR		0x38
+#define	CC_JTAGCTRL		0x3c
+#define	CC_GPIOPU		0x58
+#define	CC_GPIOPD		0x5c
+#define	CC_GPIOIN		0x60
+#define	CC_GPIOOUT		0x64
+#define	CC_GPIOOUTEN		0x68
+#define	CC_GPIOCTRL		0x6c
+#define	CC_GPIOPOL		0x70
+#define	CC_GPIOINTM		0x74
+#define	CC_WATCHDOG		0x80
+#define	CC_CLKC_N		0x90
+#define	CC_CLKC_M0		0x94
+#define	CC_CLKC_M1		0x98
+#define	CC_CLKC_M2		0x9c
+#define	CC_CLKC_M3		0xa0
+#define	CC_CLKDIV		0xa4
+#define	CC_SYS_CLK_CTL		0xc0
+#define	CC_CLK_CTL_ST		SI_CLK_CTL_ST
+#define	PMU_CTL			0x600
+#define	PMU_CAP			0x604
+#define	PMU_ST			0x608
+#define PMU_RES_STATE		0x60c
+#define PMU_TIMER		0x614
+#define	PMU_MIN_RES_MASK	0x618
+#define	PMU_MAX_RES_MASK	0x61c
+#define CC_CHIPCTL_ADDR         0x650
+#define CC_CHIPCTL_DATA         0x654
+#define PMU_REG_CONTROL_ADDR	0x658
+#define PMU_REG_CONTROL_DATA	0x65C
+#define PMU_PLL_CONTROL_ADDR 	0x660
+#define PMU_PLL_CONTROL_DATA 	0x664
+#define	CC_SROM_OTP		0x800		
+
+
+#define	CID_ID_MASK		0x0000ffff	
+#define	CID_REV_MASK		0x000f0000	
+#define	CID_REV_SHIFT		16		
+#define	CID_PKG_MASK		0x00f00000	
+#define	CID_PKG_SHIFT		20		
+#define	CID_CC_MASK		0x0f000000	
+#define CID_CC_SHIFT		24
+#define	CID_TYPE_MASK		0xf0000000	
+#define CID_TYPE_SHIFT		28
+
+
+#define	CC_CAP_UARTS_MASK	0x00000003	
+#define CC_CAP_MIPSEB		0x00000004	
+#define CC_CAP_UCLKSEL		0x00000018	
+#define CC_CAP_UINTCLK		0x00000008	
+#define CC_CAP_UARTGPIO		0x00000020	
+#define CC_CAP_EXTBUS_MASK	0x000000c0	
+#define CC_CAP_EXTBUS_NONE	0x00000000	
+#define CC_CAP_EXTBUS_FULL	0x00000040	
+#define CC_CAP_EXTBUS_PROG	0x00000080	
+#define	CC_CAP_FLASH_MASK	0x00000700	
+#define	CC_CAP_PLL_MASK		0x00038000	
+#define CC_CAP_PWR_CTL		0x00040000	
+#define CC_CAP_OTPSIZE		0x00380000	
+#define CC_CAP_OTPSIZE_SHIFT	19		
+#define CC_CAP_OTPSIZE_BASE	5		
+#define CC_CAP_JTAGP		0x00400000	
+#define CC_CAP_ROM		0x00800000	
+#define CC_CAP_BKPLN64		0x08000000	
+#define	CC_CAP_PMU		0x10000000	
+#define	CC_CAP_ECI		0x20000000	
+#define	CC_CAP_SROM		0x40000000	
+#define	CC_CAP_NFLASH		0x80000000	
+
+#define	CC_CAP2_SECI		0x00000001	
+#define	CC_CAP2_GSIO		0x00000002	
+
+
+#define CC_CAP_EXT_SECI_PRESENT   0x00000001    
+
+
+#define PLL_NONE		0x00000000
+#define PLL_TYPE1		0x00010000	
+#define PLL_TYPE2		0x00020000	
+#define PLL_TYPE3		0x00030000	
+#define PLL_TYPE4		0x00008000	
+#define PLL_TYPE5		0x00018000	
+#define PLL_TYPE6		0x00028000	
+#define PLL_TYPE7		0x00038000	
+
+
+#define	ILP_CLOCK		32000
+
+
+#define	ALP_CLOCK		20000000
+
+
+#define	HT_CLOCK		80000000
+
+
+#define CC_UARTCLKO		0x00000001	
+#define	CC_SE			0x00000002	
+#define CC_ASYNCGPIO	0x00000004	
+#define CC_UARTCLKEN		0x00000008	
+
+
+#define CHIPCTRL_4321A0_DEFAULT	0x3a4
+#define CHIPCTRL_4321A1_DEFAULT	0x0a4
+#define CHIPCTRL_4321_PLL_DOWN	0x800000	
+
+
+#define OTPS_OL_MASK		0x000000ff
+#define OTPS_OL_MFG		0x00000001	
+#define OTPS_OL_OR1		0x00000002	
+#define OTPS_OL_OR2		0x00000004	
+#define OTPS_OL_GU		0x00000008	
+#define OTPS_GUP_MASK		0x00000f00
+#define OTPS_GUP_SHIFT		8
+#define OTPS_GUP_HW		0x00000100	
+#define OTPS_GUP_SW		0x00000200	
+#define OTPS_GUP_CI		0x00000400	
+#define OTPS_GUP_FUSE		0x00000800	
+#define OTPS_READY		0x00001000
+#define OTPS_RV(x)		(1 << (16 + (x)))	
+#define OTPS_RV_MASK		0x0fff0000
+
+
+#define OTPC_PROGSEL		0x00000001
+#define OTPC_PCOUNT_MASK	0x0000000e
+#define OTPC_PCOUNT_SHIFT	1
+#define OTPC_VSEL_MASK		0x000000f0
+#define OTPC_VSEL_SHIFT		4
+#define OTPC_TMM_MASK		0x00000700
+#define OTPC_TMM_SHIFT		8
+#define OTPC_ODM		0x00000800
+#define OTPC_PROGEN		0x80000000
+
+
+#define OTPP_COL_MASK		0x000000ff
+#define OTPP_COL_SHIFT		0
+#define OTPP_ROW_MASK		0x0000ff00
+#define OTPP_ROW_SHIFT		8
+#define OTPP_OC_MASK		0x0f000000
+#define OTPP_OC_SHIFT		24
+#define OTPP_READERR		0x10000000
+#define OTPP_VALUE_MASK		0x20000000
+#define OTPP_VALUE_SHIFT	29
+#define OTPP_START_BUSY		0x80000000
+#define	OTPP_READ		0x40000000	
+
+
+#define OTP_CISFORMAT_NEW	0x80000000
+
+
+#define OTPPOC_READ		0
+#define OTPPOC_BIT_PROG		1
+#define OTPPOC_VERIFY		3
+#define OTPPOC_INIT		4
+#define OTPPOC_SET		5
+#define OTPPOC_RESET		6
+#define OTPPOC_OCST		7
+#define OTPPOC_ROW_LOCK		8
+#define OTPPOC_PRESCN_TEST	9
+
+
+
+#define	JTAGM_CREV_OLD		10	
+#define	JTAGM_CREV_IRP		22	
+#define	JTAGM_CREV_RTI		28	
+
+
+#define JCMD_START		0x80000000
+#define JCMD_BUSY		0x80000000
+#define JCMD_STATE_MASK		0x60000000
+#define JCMD_STATE_TLR		0x00000000	
+#define JCMD_STATE_PIR		0x20000000	
+#define JCMD_STATE_PDR		0x40000000	
+#define JCMD_STATE_RTI		0x60000000	
+#define JCMD0_ACC_MASK		0x0000f000
+#define JCMD0_ACC_IRDR		0x00000000
+#define JCMD0_ACC_DR		0x00001000
+#define JCMD0_ACC_IR		0x00002000
+#define JCMD0_ACC_RESET		0x00003000
+#define JCMD0_ACC_IRPDR		0x00004000
+#define JCMD0_ACC_PDR		0x00005000
+#define JCMD0_IRW_MASK		0x00000f00
+#define JCMD_ACC_MASK		0x000f0000	
+#define JCMD_ACC_IRDR		0x00000000
+#define JCMD_ACC_DR		0x00010000
+#define JCMD_ACC_IR		0x00020000
+#define JCMD_ACC_RESET		0x00030000
+#define JCMD_ACC_IRPDR		0x00040000
+#define JCMD_ACC_PDR		0x00050000
+#define JCMD_ACC_PIR		0x00060000
+#define JCMD_ACC_IRDR_I		0x00070000	
+#define JCMD_ACC_DR_I		0x00080000	
+#define JCMD_IRW_MASK		0x00001f00
+#define JCMD_IRW_SHIFT		8
+#define JCMD_DRW_MASK		0x0000003f
+
+
+#define JCTRL_FORCE_CLK		4		
+#define JCTRL_EXT_EN		2		
+#define JCTRL_EN		1		
+
+
+#define	CLKD_SFLASH		0x0f000000
+#define	CLKD_SFLASH_SHIFT	24
+#define	CLKD_OTP		0x000f0000
+#define	CLKD_OTP_SHIFT		16
+#define	CLKD_JTAG		0x00000f00
+#define	CLKD_JTAG_SHIFT		8
+#define	CLKD_UART		0x000000ff
+
+#define	CLKD2_SROM		0x00000003
+
+
+#define	CI_GPIO			0x00000001	
+#define	CI_EI			0x00000002	
+#define	CI_TEMP			0x00000004	
+#define	CI_SIRQ			0x00000008	
+#define	CI_ECI			0x00000010	
+#define	CI_PMU			0x00000020	
+#define	CI_UART			0x00000040	
+#define	CI_WDRESET		0x80000000	
+
+
+#define SCC_SS_MASK		0x00000007	
+#define	SCC_SS_LPO		0x00000000	
+#define	SCC_SS_XTAL		0x00000001	
+#define	SCC_SS_PCI		0x00000002	
+#define SCC_LF			0x00000200	
+#define SCC_LP			0x00000400	
+#define SCC_FS			0x00000800	
+#define SCC_IP			0x00001000	
+#define SCC_XC			0x00002000	
+#define SCC_XP			0x00004000	
+#define SCC_CD_MASK		0xffff0000	
+#define SCC_CD_SHIFT		16
+
+
+#define	SYCC_IE			0x00000001	
+#define	SYCC_AE			0x00000002	
+#define	SYCC_FP			0x00000004	
+#define	SYCC_AR			0x00000008	
+#define	SYCC_HR			0x00000010	
+#define SYCC_CD_MASK		0xffff0000	
+#define SYCC_CD_SHIFT		16
+
+
+#define	BPIA_BYTEEN		0x0000000f
+#define	BPIA_SZ1		0x00000001
+#define	BPIA_SZ2		0x00000003
+#define	BPIA_SZ4		0x00000007
+#define	BPIA_SZ8		0x0000000f
+#define	BPIA_WRITE		0x00000100
+#define	BPIA_START		0x00000200
+#define	BPIA_BUSY		0x00000200
+#define	BPIA_ERROR		0x00000400
+
+
+#define	CF_EN			0x00000001	
+#define	CF_EM_MASK		0x0000000e	
+#define	CF_EM_SHIFT		1
+#define	CF_EM_FLASH		0		
+#define	CF_EM_SYNC		2		
+#define	CF_EM_PCMCIA		4		
+#define	CF_DS			0x00000010	
+#define	CF_BS			0x00000020	
+#define	CF_CD_MASK		0x000000c0	
+#define	CF_CD_SHIFT		6
+#define	CF_CD_DIV2		0x00000000	
+#define	CF_CD_DIV3		0x00000040	
+#define	CF_CD_DIV4		0x00000080	
+#define	CF_CE			0x00000100	
+#define	CF_SB			0x00000200	
+
+
+#define	PM_W0_MASK		0x0000003f	
+#define	PM_W1_MASK		0x00001f00	
+#define	PM_W1_SHIFT		8
+#define	PM_W2_MASK		0x001f0000	
+#define	PM_W2_SHIFT		16
+#define	PM_W3_MASK		0x1f000000	
+#define	PM_W3_SHIFT		24
+
+
+#define	PA_W0_MASK		0x0000003f	
+#define	PA_W1_MASK		0x00001f00	
+#define	PA_W1_SHIFT		8
+#define	PA_W2_MASK		0x001f0000	
+#define	PA_W2_SHIFT		16
+#define	PA_W3_MASK		0x1f000000	
+#define	PA_W3_SHIFT		24
+
+
+#define	PI_W0_MASK		0x0000003f	
+#define	PI_W1_MASK		0x00001f00	
+#define	PI_W1_SHIFT		8
+#define	PI_W2_MASK		0x001f0000	
+#define	PI_W2_SHIFT		16
+#define	PI_W3_MASK		0x1f000000	
+#define	PI_W3_SHIFT		24
+
+
+#define	PW_W0_MASK		0x0000001f	
+#define	PW_W1_MASK		0x00001f00	
+#define	PW_W1_SHIFT		8
+#define	PW_W2_MASK		0x001f0000	
+#define	PW_W2_SHIFT		16
+#define	PW_W3_MASK		0x1f000000	
+#define	PW_W3_SHIFT		24
+
+#define PW_W0       		0x0000000c
+#define PW_W1       		0x00000a00
+#define PW_W2       		0x00020000
+#define PW_W3       		0x01000000
+
+
+#define	FW_W0_MASK		0x0000003f	
+#define	FW_W1_MASK		0x00001f00	
+#define	FW_W1_SHIFT		8
+#define	FW_W2_MASK		0x001f0000	
+#define	FW_W2_SHIFT		16
+#define	FW_W3_MASK		0x1f000000	
+#define	FW_W3_SHIFT		24
+
+
+#define	SRC_START		0x80000000
+#define	SRC_BUSY		0x80000000
+#define	SRC_OPCODE		0x60000000
+#define	SRC_OP_READ		0x00000000
+#define	SRC_OP_WRITE		0x20000000
+#define	SRC_OP_WRDIS		0x40000000
+#define	SRC_OP_WREN		0x60000000
+#define	SRC_OTPSEL		0x00000010
+#define	SRC_LOCK		0x00000008
+#define	SRC_SIZE_MASK		0x00000006
+#define	SRC_SIZE_1K		0x00000000
+#define	SRC_SIZE_4K		0x00000002
+#define	SRC_SIZE_16K		0x00000004
+#define	SRC_SIZE_SHIFT		1
+#define	SRC_PRESENT		0x00000001
+
+
+#define	PCTL_ILP_DIV_MASK	0xffff0000
+#define	PCTL_ILP_DIV_SHIFT	16
+#define PCTL_PLL_PLLCTL_UPD	0x00000400	
+#define PCTL_NOILP_ON_WAIT	0x00000200	
+#define	PCTL_HT_REQ_EN		0x00000100
+#define	PCTL_ALP_REQ_EN		0x00000080
+#define	PCTL_XTALFREQ_MASK	0x0000007c
+#define	PCTL_XTALFREQ_SHIFT	2
+#define	PCTL_ILP_DIV_EN		0x00000002
+#define	PCTL_LPO_SEL		0x00000001
+
+
+#define CSTRETCH_HT		0xffff0000
+#define CSTRETCH_ALP		0x0000ffff
+
+
+#define GPIO_ONTIME_SHIFT	16
+
+
+#define	CN_N1_MASK		0x3f		
+#define	CN_N2_MASK		0x3f00		
+#define	CN_N2_SHIFT		8
+#define	CN_PLLC_MASK		0xf0000		
+#define	CN_PLLC_SHIFT		16
+
+
+#define	CC_M1_MASK		0x3f		
+#define	CC_M2_MASK		0x3f00		
+#define	CC_M2_SHIFT		8
+#define	CC_M3_MASK		0x3f0000	
+#define	CC_M3_SHIFT		16
+#define	CC_MC_MASK		0x1f000000	
+#define	CC_MC_SHIFT		24
+
+
+#define	CC_F6_2			0x02		
+#define	CC_F6_3			0x03		
+#define	CC_F6_4			0x05		
+#define	CC_F6_5			0x09
+#define	CC_F6_6			0x11
+#define	CC_F6_7			0x21
+
+#define	CC_F5_BIAS		5		
+
+#define	CC_MC_BYPASS		0x08
+#define	CC_MC_M1		0x04
+#define	CC_MC_M1M2		0x02
+#define	CC_MC_M1M2M3		0x01
+#define	CC_MC_M1M3		0x11
+
+
+#define	CC_T2_BIAS		2		
+#define	CC_T2M2_BIAS		3		
+
+#define	CC_T2MC_M1BYP		1
+#define	CC_T2MC_M2BYP		2
+#define	CC_T2MC_M3BYP		4
+
+
+#define	CC_T6_MMASK		1		
+#define	CC_T6_M0		120000000	
+#define	CC_T6_M1		100000000	
+#define	SB2MIPS_T6(sb)		(2 * (sb))
+
+
+#define	CC_CLOCK_BASE1		24000000	
+#define CC_CLOCK_BASE2		12500000	
+
+
+#define	CLKC_5350_N		0x0311
+#define	CLKC_5350_M		0x04020009
+
+
+#define FLASH_NONE		0x000		
+#define SFLASH_ST		0x100		
+#define SFLASH_AT		0x200		
+#define	PFLASH			0x700		
+
+
+#define	CC_CFG_EN		0x0001		
+#define	CC_CFG_EM_MASK		0x000e		
+#define	CC_CFG_EM_ASYNC		0x0000		
+#define	CC_CFG_EM_SYNC		0x0002		
+#define	CC_CFG_EM_PCMCIA	0x0004		
+#define	CC_CFG_EM_IDE		0x0006		
+#define	CC_CFG_DS		0x0010		
+#define	CC_CFG_CD_MASK		0x00e0		
+#define	CC_CFG_CE		0x0100		
+#define	CC_CFG_SB		0x0200		
+#define	CC_CFG_IS		0x0400		
+
+
+#define	CC_EB_BASE		0x1a000000	
+#define	CC_EB_PCMCIA_MEM	0x1a000000	
+#define	CC_EB_PCMCIA_IO		0x1a200000	
+#define	CC_EB_PCMCIA_CFG	0x1a400000	
+#define	CC_EB_IDE		0x1a800000	
+#define	CC_EB_PCMCIA1_MEM	0x1a800000	
+#define	CC_EB_PCMCIA1_IO	0x1aa00000	
+#define	CC_EB_PCMCIA1_CFG	0x1ac00000	
+#define	CC_EB_PROGIF		0x1b000000	
+
+
+
+#define SFLASH_OPCODE		0x000000ff
+#define SFLASH_ACTION		0x00000700
+#define	SFLASH_CS_ACTIVE	0x00001000	
+#define SFLASH_START		0x80000000
+#define SFLASH_BUSY		SFLASH_START
+
+
+#define	SFLASH_ACT_OPONLY	0x0000		
+#define	SFLASH_ACT_OP1D		0x0100		
+#define	SFLASH_ACT_OP3A		0x0200		
+#define	SFLASH_ACT_OP3A1D	0x0300		
+#define	SFLASH_ACT_OP3A4D	0x0400		
+#define	SFLASH_ACT_OP3A4X4D	0x0500		
+#define	SFLASH_ACT_OP3A1X4D	0x0700		
+
+
+#define SFLASH_ST_WREN		0x0006		
+#define SFLASH_ST_WRDIS		0x0004		
+#define SFLASH_ST_RDSR		0x0105		
+#define SFLASH_ST_WRSR		0x0101		
+#define SFLASH_ST_READ		0x0303		
+#define SFLASH_ST_PP		0x0302		
+#define SFLASH_ST_SE		0x02d8		
+#define SFLASH_ST_BE		0x00c7		
+#define SFLASH_ST_DP		0x00b9		
+#define SFLASH_ST_RES		0x03ab		
+#define SFLASH_ST_CSA		0x1000		
+#define SFLASH_ST_SSE		0x0220		
+
+
+#define SFLASH_ST_WIP		0x01		
+#define SFLASH_ST_WEL		0x02		
+#define SFLASH_ST_BP_MASK	0x1c		
+#define SFLASH_ST_BP_SHIFT	2
+#define SFLASH_ST_SRWD		0x80		
+
+
+#define SFLASH_AT_READ				0x07e8
+#define SFLASH_AT_PAGE_READ			0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS			0x01d7
+#define SFLASH_AT_BUF1_WRITE			0x0384
+#define SFLASH_AT_BUF2_WRITE			0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM		0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM		0x0286
+#define SFLASH_AT_BUF1_PROGRAM			0x0288
+#define SFLASH_AT_BUF2_PROGRAM			0x0289
+#define SFLASH_AT_PAGE_ERASE			0x0281
+#define SFLASH_AT_BLOCK_ERASE			0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define SFLASH_AT_BUF1_LOAD			0x0253
+#define SFLASH_AT_BUF2_LOAD			0x0255
+#define SFLASH_AT_BUF1_COMPARE			0x0260
+#define SFLASH_AT_BUF2_COMPARE			0x0261
+#define SFLASH_AT_BUF1_REPROGRAM		0x0258
+#define SFLASH_AT_BUF2_REPROGRAM		0x0259
+
+
+#define SFLASH_AT_READY				0x80
+#define SFLASH_AT_MISMATCH			0x40
+#define SFLASH_AT_ID_MASK			0x38
+#define SFLASH_AT_ID_SHIFT			3
+
+
+#define GSIO_START			0x80000000
+#define GSIO_BUSY			GSIO_START
+
+
+
+#define UART_RX		0	
+#define UART_TX		0	
+#define UART_DLL	0	
+#define UART_IER	1	
+#define UART_DLM	1	
+#define UART_IIR	2	
+#define UART_FCR	2	
+#define UART_LCR	3	
+#define UART_MCR	4	
+#define UART_LSR	5	
+#define UART_MSR	6	
+#define UART_SCR	7	
+#define UART_LCR_DLAB	0x80	
+#define UART_LCR_WLEN8	0x03	
+#define UART_MCR_OUT2	0x08	
+#define UART_MCR_LOOP	0x10	
+#define UART_LSR_RX_FIFO 	0x80	
+#define UART_LSR_TDHR		0x40	
+#define UART_LSR_THRE		0x20	
+#define UART_LSR_BREAK		0x10	
+#define UART_LSR_FRAMING	0x08	
+#define UART_LSR_PARITY		0x04	
+#define UART_LSR_OVERRUN	0x02	
+#define UART_LSR_RXRDY		0x01	
+#define UART_FCR_FIFO_ENABLE 1	
+
+
+#define UART_IIR_FIFO_MASK	0xc0	
+#define UART_IIR_INT_MASK	0xf	
+#define UART_IIR_MDM_CHG	0x0	
+#define UART_IIR_NOINT		0x1	
+#define UART_IIR_THRE		0x2	
+#define UART_IIR_RCVD_DATA	0x4	
+#define UART_IIR_RCVR_STATUS 	0x6	
+#define UART_IIR_CHAR_TIME 	0xc	
+
+
+#define UART_IER_EDSSI	8	
+#define UART_IER_ELSI	4	
+#define UART_IER_ETBEI  2	
+#define UART_IER_ERBFI	1	
+
+
+#define PST_EXTLPOAVAIL	0x0100
+#define PST_WDRESET	0x0080
+#define	PST_INTPEND	0x0040
+#define	PST_SBCLKST	0x0030
+#define	PST_SBCLKST_ILP	0x0010
+#define	PST_SBCLKST_ALP	0x0020
+#define	PST_SBCLKST_HT	0x0030
+#define	PST_ALPAVAIL	0x0008
+#define	PST_HTAVAIL	0x0004
+#define	PST_RESINIT	0x0003
+
+
+#define PCAP_REV_MASK	0x000000ff
+#define PCAP_RC_MASK	0x00001f00
+#define PCAP_RC_SHIFT	8
+#define PCAP_TC_MASK	0x0001e000
+#define PCAP_TC_SHIFT	13
+#define PCAP_PC_MASK	0x001e0000
+#define PCAP_PC_SHIFT	17
+#define PCAP_VC_MASK	0x01e00000
+#define PCAP_VC_SHIFT	21
+#define PCAP_CC_MASK	0x1e000000
+#define PCAP_CC_SHIFT	25
+#define PCAP5_PC_MASK	0x003e0000	
+#define PCAP5_PC_SHIFT	17
+#define PCAP5_VC_MASK	0x07c00000
+#define PCAP5_VC_SHIFT	22
+#define PCAP5_CC_MASK	0xf8000000
+#define PCAP5_CC_SHIFT	27
+
+
+
+#define	PRRT_TIME_MASK	0x03ff
+#define	PRRT_INTEN	0x0400
+#define	PRRT_REQ_ACTIVE	0x0800
+#define	PRRT_ALP_REQ	0x1000
+#define	PRRT_HT_REQ	0x2000
+
+
+#define PMURES_BIT(bit)	(1 << (bit))
+
+
+#define PMURES_MAX_RESNUM	30
+
+
+#define	PMU_CHIPCTL0		0
+
+
+#define PMU_CC1_CLKREQ_TYPE_SHIFT	19
+#define PMU_CC1_CLKREQ_TYPE_MASK	(1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN		0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL		1
+
+
+#define	PMU_CHIPCTL1			1
+#define	PMU_CC1_RXC_DLL_BYPASS		0x00010000
+
+#define PMU_CC1_IF_TYPE_MASK   		0x00000030
+#define PMU_CC1_IF_TYPE_RMII    	0x00000000
+#define PMU_CC1_IF_TYPE_MII     	0x00000010
+#define PMU_CC1_IF_TYPE_RGMII   	0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK    	0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY    	0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 	0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII	0x00000080
+#define PMU_CC1_SW_TYPE_RGMII   	0x000000c0
+
+
+
+
+
+#define	PMU0_PLL0_PLLCTL0		0
+#define	PMU0_PLL0_PC0_PDIV_MASK		1
+#define	PMU0_PLL0_PC0_PDIV_FREQ		25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK	0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT	3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE	8
+
+
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ	0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ	1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ	2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ	3 
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ	4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ	5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ	6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ	7
+
+
+#define	PMU0_PLL0_PLLCTL1		1
+#define	PMU0_PLL0_PC1_WILD_INT_MASK	0xf0000000
+#define	PMU0_PLL0_PC1_WILD_INT_SHIFT	28
+#define	PMU0_PLL0_PC1_WILD_FRAC_MASK	0x0fffff00
+#define	PMU0_PLL0_PC1_WILD_FRAC_SHIFT	8
+#define	PMU0_PLL0_PC1_STOP_MOD		0x00000040
+
+
+#define	PMU0_PLL0_PLLCTL2		2
+#define	PMU0_PLL0_PC2_WILD_INT_MASK	0xf
+#define	PMU0_PLL0_PC2_WILD_INT_SHIFT	4
+
+
+
+#define PMU1_PLL0_PLLCTL0		0
+#define PMU1_PLL0_PC0_P1DIV_MASK	0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT	20
+#define PMU1_PLL0_PC0_P2DIV_MASK	0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT	24
+
+
+#define PMU1_PLL0_PLLCTL1		1
+#define PMU1_PLL0_PC1_M1DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT	0
+#define PMU1_PLL0_PC1_M2DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT	8
+#define PMU1_PLL0_PC1_M3DIV_MASK	0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT	16
+#define PMU1_PLL0_PC1_M4DIV_MASK	0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT	24
+#define PMU1_PLL0_PC1_M4DIV_BY_9	9
+#define PMU1_PLL0_PC1_M4DIV_BY_18	0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36	0x24
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL  (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+
+#define PMU1_PLL0_PLLCTL2		2
+#define PMU1_PLL0_PC2_M5DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT	0
+#define PMU1_PLL0_PC2_M5DIV_BY_12	0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT	8
+#define PMU1_PLL0_PC2_M6DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT	17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH	1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB	2	
+#define PMU1_PLL0_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT	20
+
+
+#define PMU1_PLL0_PLLCTL3		3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT	0
+
+
+#define PMU1_PLL0_PLLCTL4		4
+
+
+#define PMU1_PLL0_PLLCTL5		5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+
+#define PMU2_PHY_PLL_PLLCTL		4
+#define PMU2_SI_PLL_PLLCTL		10
+
+
+
+
+#define PMU2_PLL_PLLCTL0		0
+#define PMU2_PLL_PC0_P1DIV_MASK 	0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT	20
+#define PMU2_PLL_PC0_P2DIV_MASK 	0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT	24
+
+
+#define PMU2_PLL_PLLCTL1		1
+#define PMU2_PLL_PC1_M1DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT	0
+#define PMU2_PLL_PC1_M2DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT	8
+#define PMU2_PLL_PC1_M3DIV_MASK 	0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT	16
+#define PMU2_PLL_PC1_M4DIV_MASK 	0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT	24
+
+
+#define PMU2_PLL_PLLCTL2		2
+#define PMU2_PLL_PC2_M5DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT	0
+#define PMU2_PLL_PC2_M6DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT	8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT	17
+#define PMU2_PLL_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT	20
+
+
+#define PMU2_PLL_PLLCTL3		3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT	0
+
+
+#define PMU2_PLL_PLLCTL4		4
+
+
+#define PMU2_PLL_PLLCTL5		5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK	0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT	8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK	0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT	12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK	0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT	16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK	0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT	20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK	0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT	24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK	0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT	28
+
+
+#define	PMU5_PLL_P1P2_OFF		0
+#define	PMU5_PLL_P1_MASK		0x0f000000
+#define	PMU5_PLL_P1_SHIFT		24
+#define	PMU5_PLL_P2_MASK		0x00f00000
+#define	PMU5_PLL_P2_SHIFT		20
+#define	PMU5_PLL_M14_OFF		1
+#define	PMU5_PLL_MDIV_MASK		0x000000ff
+#define	PMU5_PLL_MDIV_WIDTH		8
+#define	PMU5_PLL_NM5_OFF		2
+#define	PMU5_PLL_NDIV_MASK		0xfff00000
+#define	PMU5_PLL_NDIV_SHIFT		20
+#define	PMU5_PLL_NDIV_MODE_MASK		0x000e0000
+#define	PMU5_PLL_NDIV_MODE_SHIFT	17
+#define	PMU5_PLL_FMAB_OFF		3
+#define	PMU5_PLL_MRAT_MASK		0xf0000000
+#define	PMU5_PLL_MRAT_SHIFT		28
+#define	PMU5_PLL_ABRAT_MASK		0x08000000
+#define	PMU5_PLL_ABRAT_SHIFT		27
+#define	PMU5_PLL_FDIV_MASK		0x07ffffff
+#define	PMU5_PLL_PLLCTL_OFF		4
+#define	PMU5_PLL_PCHI_OFF		5
+#define	PMU5_PLL_PCHI_MASK		0x0000003f
+
+
+#define	PMU_XTALFREQ_REG_ILPCTR_MASK	0x00001FFF
+#define	PMU_XTALFREQ_REG_MEASURE_MASK	0x80000000
+#define	PMU_XTALFREQ_REG_MEASURE_SHIFT	31
+
+
+#define	PMU5_MAINPLL_CPU		1
+#define	PMU5_MAINPLL_MEM		2
+#define	PMU5_MAINPLL_SI			3
+
+#define PMU7_PLL_PLLCTL7                7
+#define PMU7_PLL_CTL7_M4DIV_MASK	0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 	24
+#define PMU7_PLL_CTL7_M4DIV_BY_6	6
+#define PMU7_PLL_CTL7_M4DIV_BY_12	0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL8                8
+#define PMU7_PLL_CTL8_M5DIV_MASK	0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT	0
+#define PMU7_PLL_CTL8_M5DIV_BY_8	8
+#define PMU7_PLL_CTL8_M5DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24	0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK	0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT	8
+#define PMU7_PLL_CTL8_M6DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL11		11
+#define PMU7_PLL_PLLCTL11_MASK		0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL		0x22222200
+
+
+#define	PMU4716_MAINPLL_PLL0		12
+
+
+#define	PMU5356_MAINPLL_PLL0		0
+#define	PMU5357_MAINPLL_PLL0		0
+
+
+#define RES4716_PROC_PLL_ON		0x00000040
+#define RES4716_PROC_HT_AVAIL		0x00000080
+
+
+#define CCTRL_471X_I2S_PINS_ENABLE	0x0080 
+
+
+
+#define CCTRL_5357_I2S_PINS_ENABLE	0x00040000 
+#define CCTRL_5357_I2CSPI_PINS_ENABLE	0x00080000 
+
+
+#define RES5354_EXT_SWITCHER_PWM	0	
+#define RES5354_BB_SWITCHER_PWM		1	
+#define RES5354_BB_SWITCHER_BURST	2	
+#define RES5354_BB_EXT_SWITCHER_BURST	3	
+#define RES5354_ILP_REQUEST		4	
+#define RES5354_RADIO_SWITCHER_PWM	5	
+#define RES5354_RADIO_SWITCHER_BURST	6	
+#define RES5354_ROM_SWITCH		7	
+#define RES5354_PA_REF_LDO		8	
+#define RES5354_RADIO_LDO		9	
+#define RES5354_AFE_LDO			10	
+#define RES5354_PLL_LDO			11	
+#define RES5354_BG_FILTBYP		12	
+#define RES5354_TX_FILTBYP		13	
+#define RES5354_RX_FILTBYP		14	
+#define RES5354_XTAL_PU			15	
+#define RES5354_XTAL_EN			16	
+#define RES5354_BB_PLL_FILTBYP		17	
+#define RES5354_RF_PLL_FILTBYP		18	
+#define RES5354_BB_PLL_PU		19	
+
+
+#define CCTRL5357_EXTPA                 (1<<14)  
+#define CCTRL5357_ANT_MUX_2o3		(1<<15)  
+
+
+#define RES4328_EXT_SWITCHER_PWM	0	
+#define RES4328_BB_SWITCHER_PWM		1	
+#define RES4328_BB_SWITCHER_BURST	2	
+#define RES4328_BB_EXT_SWITCHER_BURST	3	
+#define RES4328_ILP_REQUEST		4	
+#define RES4328_RADIO_SWITCHER_PWM	5	
+#define RES4328_RADIO_SWITCHER_BURST	6	
+#define RES4328_ROM_SWITCH		7	
+#define RES4328_PA_REF_LDO		8	
+#define RES4328_RADIO_LDO		9	
+#define RES4328_AFE_LDO			10	
+#define RES4328_PLL_LDO			11	
+#define RES4328_BG_FILTBYP		12	
+#define RES4328_TX_FILTBYP		13	
+#define RES4328_RX_FILTBYP		14	
+#define RES4328_XTAL_PU			15	
+#define RES4328_XTAL_EN			16	
+#define RES4328_BB_PLL_FILTBYP		17	
+#define RES4328_RF_PLL_FILTBYP		18	
+#define RES4328_BB_PLL_PU		19	
+
+
+#define RES4325_BUCK_BOOST_BURST	0	
+#define RES4325_CBUCK_BURST		1	
+#define RES4325_CBUCK_PWM		2	
+#define RES4325_CLDO_CBUCK_BURST	3	
+#define RES4325_CLDO_CBUCK_PWM		4	
+#define RES4325_BUCK_BOOST_PWM		5	
+#define RES4325_ILP_REQUEST		6	
+#define RES4325_ABUCK_BURST		7	
+#define RES4325_ABUCK_PWM		8	
+#define RES4325_LNLDO1_PU		9	
+#define RES4325_OTP_PU			10	
+#define RES4325_LNLDO3_PU		11	
+#define RES4325_LNLDO4_PU		12	
+#define RES4325_XTAL_PU			13	
+#define RES4325_ALP_AVAIL		14	
+#define RES4325_RX_PWRSW_PU		15	
+#define RES4325_TX_PWRSW_PU		16	
+#define RES4325_RFPLL_PWRSW_PU		17	
+#define RES4325_LOGEN_PWRSW_PU		18	
+#define RES4325_AFE_PWRSW_PU		19	
+#define RES4325_BBPLL_PWRSW_PU		20	
+#define RES4325_HT_AVAIL		21	
+
+
+#define RES4325B0_CBUCK_LPOM		1	
+#define RES4325B0_CBUCK_BURST		2	
+#define RES4325B0_CBUCK_PWM		3	
+#define RES4325B0_CLDO_PU		4	
+
+
+#define RES4325C1_LNLDO2_PU		12	
+
+
+#define CST4325_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4325_DEFCIS_SEL		0	
+#define CST4325_SPROM_SEL		1	
+#define CST4325_OTP_SEL			2	
+#define CST4325_OTP_PWRDN		3	
+#define CST4325_SDIO_USB_MODE_MASK	0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT	2
+#define CST4325_RCAL_VALID_MASK		0x00000008
+#define CST4325_RCAL_VALID_SHIFT	3
+#define CST4325_RCAL_VALUE_MASK		0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT	4
+#define CST4325_PMUTOP_2B_MASK 		0x00000200	
+#define CST4325_PMUTOP_2B_SHIFT   	9
+
+#define RES4329_RESERVED0		0	
+#define RES4329_CBUCK_LPOM		1	
+#define RES4329_CBUCK_BURST		2	
+#define RES4329_CBUCK_PWM		3	
+#define RES4329_CLDO_PU			4	
+#define RES4329_PALDO_PU		5	
+#define RES4329_ILP_REQUEST		6	
+#define RES4329_RESERVED7		7	
+#define RES4329_RESERVED8		8	
+#define RES4329_LNLDO1_PU		9	
+#define RES4329_OTP_PU			10	
+#define RES4329_RESERVED11		11	
+#define RES4329_LNLDO2_PU		12	
+#define RES4329_XTAL_PU			13	
+#define RES4329_ALP_AVAIL		14	
+#define RES4329_RX_PWRSW_PU		15	
+#define RES4329_TX_PWRSW_PU		16	
+#define RES4329_RFPLL_PWRSW_PU		17	
+#define RES4329_LOGEN_PWRSW_PU		18	
+#define RES4329_AFE_PWRSW_PU		19	
+#define RES4329_BBPLL_PWRSW_PU		20	
+#define RES4329_HT_AVAIL		21	
+
+#define CST4329_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4329_DEFCIS_SEL		0	
+#define CST4329_SPROM_SEL		1	
+#define CST4329_OTP_SEL			2	
+#define CST4329_OTP_PWRDN		3	
+#define CST4329_SPI_SDIO_MODE_MASK	0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT	2
+
+
+#define CST4312_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4312_DEFCIS_SEL		0	
+#define CST4312_SPROM_SEL		1	
+#define CST4312_OTP_SEL			2	
+#define CST4312_OTP_BAD			3	
+
+
+#define RES4312_SWITCHER_BURST		0	
+#define RES4312_SWITCHER_PWM    	1	
+#define RES4312_PA_REF_LDO		2	
+#define RES4312_CORE_LDO_BURST		3	
+#define RES4312_CORE_LDO_PWM		4	
+#define RES4312_RADIO_LDO		5	
+#define RES4312_ILP_REQUEST		6	
+#define RES4312_BG_FILTBYP		7	
+#define RES4312_TX_FILTBYP		8	
+#define RES4312_RX_FILTBYP		9	
+#define RES4312_XTAL_PU			10	
+#define RES4312_ALP_AVAIL		11	
+#define RES4312_BB_PLL_FILTBYP		12	
+#define RES4312_RF_PLL_FILTBYP		13	
+#define RES4312_HT_AVAIL		14	
+
+
+#define RES4322_RF_LDO			0
+#define RES4322_ILP_REQUEST		1
+#define RES4322_XTAL_PU			2
+#define RES4322_ALP_AVAIL		3
+#define RES4322_SI_PLL_ON		4
+#define RES4322_HT_SI_AVAIL		5
+#define RES4322_PHY_PLL_ON		6
+#define RES4322_HT_PHY_AVAIL		7
+#define RES4322_OTP_PU			8
+
+
+#define CST4322_XTAL_FREQ_20_40MHZ	0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK	0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT	6
+#define CST4322_NO_SPROM_OTP		0	
+#define CST4322_SPROM_PRESENT		1	
+#define CST4322_OTP_PRESENT		2	
+#define CST4322_PCI_OR_USB		0x00000100
+#define CST4322_BOOT_MASK		0x00000600
+#define CST4322_BOOT_SHIFT		9
+#define CST4322_BOOT_FROM_SRAM		0	
+#define CST4322_BOOT_FROM_ROM		1	
+#define CST4322_BOOT_FROM_FLASH		2	
+#define CST4322_BOOT_FROM_INVALID	3
+#define CST4322_ILP_DIV_EN		0x00000800
+#define CST4322_FLASH_TYPE_MASK		0x00001000
+#define CST4322_FLASH_TYPE_SHIFT	12
+#define CST4322_FLASH_TYPE_SHIFT_ST	0	
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL	1	
+#define CST4322_ARM_TAP_SEL		0x00002000
+#define CST4322_RES_INIT_MODE_MASK	0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT	14
+#define CST4322_RES_INIT_MODE_ILPAVAIL	0	
+#define CST4322_RES_INIT_MODE_ILPREQ	1	
+#define CST4322_RES_INIT_MODE_ALPAVAIL	2	
+#define CST4322_RES_INIT_MODE_HTAVAIL	3	
+#define CST4322_PCIPLLCLK_GATING	0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP	0x00020000
+#define CST4322_PCI_CARDBUS_MODE	0x00040000
+
+
+#define CCTRL43224_GPIO_TOGGLE          0x8000 
+#define CCTRL_43224A0_12MA_LED_DRIVE    0x00F000F0 
+#define CCTRL_43224B0_12MA_LED_DRIVE    0xF0    
+
+
+#define RES43236_REGULATOR		0
+#define RES43236_ILP_REQUEST		1
+#define RES43236_XTAL_PU		2
+#define RES43236_ALP_AVAIL		3
+#define RES43236_SI_PLL_ON		4
+#define RES43236_HT_SI_AVAIL		5
+
+
+#define CCTRL43236_BT_COEXIST		(1<<0)	
+#define CCTRL43236_SECI			(1<<1)	
+#define CCTRL43236_EXT_LNA		(1<<2)	
+#define CCTRL43236_ANT_MUX_2o3          (1<<3)	
+#define CCTRL43236_GSIO			(1<<4)	
+
+
+#define CST43236_SFLASH_MASK		0x00000040
+#define CST43236_OTP_SEL_MASK		0x00000080
+#define CST43236_OTP_SEL_SHIFT		7
+#define CST43236_HSIC_MASK		0x00000100	
+#define CST43236_BP_CLK			0x00000200	
+#define CST43236_BOOT_MASK		0x00001800
+#define CST43236_BOOT_SHIFT		11
+#define CST43236_BOOT_FROM_SRAM		0	
+#define CST43236_BOOT_FROM_ROM		1	
+#define CST43236_BOOT_FROM_FLASH	2	
+#define CST43236_BOOT_FROM_INVALID	3
+
+
+#define RES43237_REGULATOR		0
+#define RES43237_ILP_REQUEST		1
+#define RES43237_XTAL_PU		2
+#define RES43237_ALP_AVAIL		3
+#define RES43237_SI_PLL_ON		4
+#define RES43237_HT_SI_AVAIL		5
+
+
+#define CCTRL43237_BT_COEXIST		(1<<0)	
+#define CCTRL43237_SECI			(1<<1)	
+#define CCTRL43237_EXT_LNA		(1<<2)	
+#define CCTRL43237_ANT_MUX_2o3          (1<<3)	
+#define CCTRL43237_GSIO			(1<<4)	
+
+
+#define CST43237_SFLASH_MASK		0x00000040
+#define CST43237_OTP_SEL_MASK		0x00000080
+#define CST43237_OTP_SEL_SHIFT		7
+#define CST43237_HSIC_MASK		0x00000100	
+#define CST43237_BP_CLK			0x00000200	
+#define CST43237_BOOT_MASK		0x00001800
+#define CST43237_BOOT_SHIFT		11
+#define CST43237_BOOT_FROM_SRAM		0	
+#define CST43237_BOOT_FROM_ROM		1	
+#define CST43237_BOOT_FROM_FLASH	2	
+#define CST43237_BOOT_FROM_INVALID	3
+
+
+#define RES43239_OTP_PU			9
+#define RES43239_MACPHY_CLKAVAIL	23
+#define RES43239_HT_AVAIL		24
+
+
+#define CST43239_SPROM_MASK			0x00000002
+#define CST43239_SFLASH_MASK		0x00000004
+#define	CST43239_RES_INIT_MODE_SHIFT	7
+#define	CST43239_RES_INIT_MODE_MASK		0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs)	((cs) & (1 << 15))	
+#define CST43239_CHIPMODE_USB20D(cs)	((cs) & !(1 << 15))	
+#define CST43239_CHIPMODE_SDIO(cs)	(((cs) & (1 << 0)) == 0)	
+#define CST43239_CHIPMODE_GSPI(cs)	(((cs) & (1 << 0)) == (1 << 0))	
+
+
+#define CCTRL43239_XTAL_STRENGTH(ctl)	((ctl & 0x3F) << 12)
+
+
+
+
+#define RES4315_CBUCK_LPOM		1	
+#define RES4315_CBUCK_BURST		2	
+#define RES4315_CBUCK_PWM		3	
+#define RES4315_CLDO_PU			4	
+#define RES4315_PALDO_PU		5	
+#define RES4315_ILP_REQUEST		6	
+#define RES4315_LNLDO1_PU		9	
+#define RES4315_OTP_PU			10	
+#define RES4315_LNLDO2_PU		12	
+#define RES4315_XTAL_PU			13	
+#define RES4315_ALP_AVAIL		14	
+#define RES4315_RX_PWRSW_PU		15	
+#define RES4315_TX_PWRSW_PU		16	
+#define RES4315_RFPLL_PWRSW_PU		17	
+#define RES4315_LOGEN_PWRSW_PU		18	
+#define RES4315_AFE_PWRSW_PU		19	
+#define RES4315_BBPLL_PWRSW_PU		20	
+#define RES4315_HT_AVAIL		21	
+
+
+#define CST4315_SPROM_OTP_SEL_MASK	0x00000003	
+#define CST4315_DEFCIS_SEL		0x00000000	
+#define CST4315_SPROM_SEL		0x00000001	
+#define CST4315_OTP_SEL			0x00000002	
+#define CST4315_OTP_PWRDN		0x00000003	
+#define CST4315_SDIO_MODE		0x00000004	
+#define CST4315_RCAL_VALID		0x00000008
+#define CST4315_RCAL_VALUE_MASK		0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT	4
+#define CST4315_PALDO_EXTPNP		0x00000200	
+#define CST4315_CBUCK_MODE_MASK		0x00000c00
+#define CST4315_CBUCK_MODE_BURST	0x00000400
+#define CST4315_CBUCK_MODE_LPBURST	0x00000c00
+
+
+#define RES4319_CBUCK_LPOM		1	
+#define RES4319_CBUCK_BURST		2	
+#define RES4319_CBUCK_PWM		3	
+#define RES4319_CLDO_PU			4	
+#define RES4319_PALDO_PU		5	
+#define RES4319_ILP_REQUEST		6	
+#define RES4319_LNLDO1_PU		9	
+#define RES4319_OTP_PU			10	
+#define RES4319_LNLDO2_PU		12	
+#define RES4319_XTAL_PU			13	
+#define RES4319_ALP_AVAIL		14	
+#define RES4319_RX_PWRSW_PU		15	
+#define RES4319_TX_PWRSW_PU		16	
+#define RES4319_RFPLL_PWRSW_PU		17	
+#define RES4319_LOGEN_PWRSW_PU		18	
+#define RES4319_AFE_PWRSW_PU		19	
+#define RES4319_BBPLL_PWRSW_PU		20	
+#define RES4319_HT_AVAIL		21	
+
+
+#define	CST4319_SPI_CPULESSUSB		0x00000001
+#define	CST4319_SPI_CLK_POL		0x00000002
+#define	CST4319_SPI_CLK_PH		0x00000008
+#define	CST4319_SPROM_OTP_SEL_MASK	0x000000c0	
+#define	CST4319_SPROM_OTP_SEL_SHIFT	6
+#define	CST4319_DEFCIS_SEL		0x00000000	
+#define	CST4319_SPROM_SEL		0x00000040	
+#define	CST4319_OTP_SEL			0x00000080      
+#define	CST4319_OTP_PWRDN		0x000000c0      
+#define	CST4319_SDIO_USB_MODE		0x00000100	
+#define	CST4319_REMAP_SEL_MASK		0x00000600
+#define	CST4319_ILPDIV_EN		0x00000800
+#define	CST4319_XTAL_PD_POL		0x00001000
+#define	CST4319_LPO_SEL			0x00002000
+#define	CST4319_RES_INIT_MODE		0x0000c000
+#define	CST4319_PALDO_EXTPNP		0x00010000	
+#define	CST4319_CBUCK_MODE_MASK		0x00060000
+#define CST4319_CBUCK_MODE_BURST	0x00020000
+#define CST4319_CBUCK_MODE_LPBURST	0x00060000
+#define	CST4319_RCAL_VALID		0x01000000
+#define	CST4319_RCAL_VALUE_MASK		0x3e000000
+#define	CST4319_RCAL_VALUE_SHIFT	25
+
+#define PMU1_PLL0_CHIPCTL0		0
+#define PMU1_PLL0_CHIPCTL1		1
+#define PMU1_PLL0_CHIPCTL2		2
+#define CCTL_4319USB_XTAL_SEL_MASK	0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT	19
+#define CCTL_4319USB_48MHZ_PLL_SEL	1
+#define CCTL_4319USB_24MHZ_PLL_SEL	2
+
+
+#define	RES4336_CBUCK_LPOM		0
+#define	RES4336_CBUCK_BURST		1
+#define	RES4336_CBUCK_LP_PWM		2
+#define	RES4336_CBUCK_PWM		3
+#define	RES4336_CLDO_PU			4
+#define	RES4336_DIS_INT_RESET_PD	5
+#define	RES4336_ILP_REQUEST		6
+#define	RES4336_LNLDO_PU		7
+#define	RES4336_LDO3P3_PU		8
+#define	RES4336_OTP_PU			9
+#define	RES4336_XTAL_PU			10
+#define	RES4336_ALP_AVAIL		11
+#define	RES4336_RADIO_PU		12
+#define	RES4336_BG_PU			13
+#define	RES4336_VREG1p4_PU_PU		14
+#define	RES4336_AFE_PWRSW_PU		15
+#define	RES4336_RX_PWRSW_PU		16
+#define	RES4336_TX_PWRSW_PU		17
+#define	RES4336_BB_PWRSW_PU		18
+#define	RES4336_SYNTH_PWRSW_PU		19
+#define	RES4336_MISC_PWRSW_PU		20
+#define	RES4336_LOGEN_PWRSW_PU		21
+#define	RES4336_BBPLL_PWRSW_PU		22
+#define	RES4336_MACPHY_CLKAVAIL		23
+#define	RES4336_HT_AVAIL		24
+#define	RES4336_RSVD			25
+
+
+#define	CST4336_SPI_MODE_MASK		0x00000001
+#define	CST4336_SPROM_PRESENT		0x00000002
+#define	CST4336_OTP_PRESENT		0x00000004
+#define	CST4336_ARMREMAP_0		0x00000008
+#define	CST4336_ILPDIV_EN_MASK		0x00000010
+#define	CST4336_ILPDIV_EN_SHIFT		4
+#define	CST4336_XTAL_PD_POL_MASK	0x00000020
+#define	CST4336_XTAL_PD_POL_SHIFT	5
+#define	CST4336_LPO_SEL_MASK		0x00000040
+#define	CST4336_LPO_SEL_SHIFT		6
+#define	CST4336_RES_INIT_MODE_MASK	0x00000180
+#define	CST4336_RES_INIT_MODE_SHIFT	7
+#define	CST4336_CBUCK_MODE_MASK		0x00000600
+#define	CST4336_CBUCK_MODE_SHIFT	9
+
+
+#define PCTL_4336_SERIAL_ENAB	(1  << 24)
+
+
+#define	RES4330_CBUCK_LPOM		0
+#define	RES4330_CBUCK_BURST		1
+#define	RES4330_CBUCK_LP_PWM		2
+#define	RES4330_CBUCK_PWM		3
+#define	RES4330_CLDO_PU			4
+#define	RES4330_DIS_INT_RESET_PD	5
+#define	RES4330_ILP_REQUEST		6
+#define	RES4330_LNLDO_PU		7
+#define	RES4330_LDO3P3_PU		8
+#define	RES4330_OTP_PU			9
+#define	RES4330_XTAL_PU			10
+#define	RES4330_ALP_AVAIL		11
+#define	RES4330_RADIO_PU		12
+#define	RES4330_BG_PU			13
+#define	RES4330_VREG1p4_PU_PU		14
+#define	RES4330_AFE_PWRSW_PU		15
+#define	RES4330_RX_PWRSW_PU		16
+#define	RES4330_TX_PWRSW_PU		17
+#define	RES4330_BB_PWRSW_PU		18
+#define	RES4330_SYNTH_PWRSW_PU		19
+#define	RES4330_MISC_PWRSW_PU		20
+#define	RES4330_LOGEN_PWRSW_PU		21
+#define	RES4330_BBPLL_PWRSW_PU		22
+#define	RES4330_MACPHY_CLKAVAIL		23
+#define	RES4330_HT_AVAIL		24
+#define	RES4330_5gRX_PWRSW_PU		25
+#define	RES4330_5gTX_PWRSW_PU		26
+#define	RES4330_5g_LOGEN_PWRSW_PU	27
+
+
+#define CST4330_CHIPMODE_SDIOD(cs)	(((cs) & 0x7) < 6)	
+#define CST4330_CHIPMODE_USB20D(cs)	(((cs) & 0x7) >= 6)	
+#define CST4330_CHIPMODE_SDIO(cs)	(((cs) & 0x4) == 0)	
+#define CST4330_CHIPMODE_GSPI(cs)	(((cs) & 0x6) == 4)	
+#define CST4330_CHIPMODE_USB(cs)	(((cs) & 0x7) == 6)	
+#define CST4330_CHIPMODE_USBDA(cs)	(((cs) & 0x7) == 7)	
+#define	CST4330_OTP_PRESENT		0x00000010
+#define	CST4330_LPO_AUTODET_EN		0x00000020
+#define	CST4330_ARMREMAP_0		0x00000040
+#define	CST4330_SPROM_PRESENT		0x00000080	
+#define	CST4330_ILPDIV_EN		0x00000100
+#define	CST4330_LPO_SEL			0x00000200
+#define	CST4330_RES_INIT_MODE_SHIFT	10
+#define	CST4330_RES_INIT_MODE_MASK	0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT	12
+#define CST4330_CBUCK_MODE_MASK		0x00003000
+#define	CST4330_CBUCK_POWER_OK		0x00004000
+#define	CST4330_BB_PLL_LOCKED		0x00008000
+#define SOCDEVRAM_4330_BP_ADDR		0x1E000000
+#define SOCDEVRAM_4330_ARM_ADDR		0x00800000
+
+
+#define PCTL_4330_SERIAL_ENAB	(1  << 24)
+
+
+#define CCTRL_4330_GPIO_SEL		0x00000001    
+#define CCTRL_4330_ERCX_SEL		0x00000002    
+#define CCTRL_4330_SDIO_HOST_WAKE	0x00000004    
+#define CCTRL_4330_JTAG_DISABLE	0x00000008    
+
+
+#define	RES4313_BB_PU_RSRC		0
+#define	RES4313_ILP_REQ_RSRC		1
+#define	RES4313_XTAL_PU_RSRC		2
+#define	RES4313_ALP_AVAIL_RSRC		3
+#define	RES4313_RADIO_PU_RSRC		4
+#define	RES4313_BG_PU_RSRC		5
+#define	RES4313_VREG1P4_PU_RSRC		6
+#define	RES4313_AFE_PWRSW_RSRC		7
+#define	RES4313_RX_PWRSW_RSRC		8
+#define	RES4313_TX_PWRSW_RSRC		9
+#define	RES4313_BB_PWRSW_RSRC		10
+#define	RES4313_SYNTH_PWRSW_RSRC	11
+#define	RES4313_MISC_PWRSW_RSRC		12
+#define	RES4313_BB_PLL_PWRSW_RSRC	13
+#define	RES4313_HT_AVAIL_RSRC		14
+#define	RES4313_MACPHY_CLK_AVAIL_RSRC	15
+
+
+#define	CST4313_SPROM_PRESENT			1
+#define	CST4313_OTP_PRESENT			2
+#define	CST4313_SPROM_OTP_SEL_MASK		0x00000002
+#define	CST4313_SPROM_OTP_SEL_SHIFT		0
+
+
+#define CCTRL_4313_12MA_LED_DRIVE    0x00000007    
+
+
+#define RES43228_NOT_USED		0
+#define RES43228_ILP_REQUEST		1
+#define RES43228_XTAL_PU		2
+#define RES43228_ALP_AVAIL		3
+#define RES43228_PLL_EN			4
+#define RES43228_HT_PHY_AVAIL		5
+
+
+#define CST43228_ILP_DIV_EN		0x1
+#define	CST43228_OTP_PRESENT		0x2
+#define	CST43228_SERDES_REFCLK_PADSEL	0x4
+#define	CST43228_SDIO_MODE		0x8
+#define	CST43228_SDIO_OTP_PRESENT	0x10
+#define	CST43228_SDIO_RESET		0x20
+
+
+#define PMU_MAX_TRANSITION_DLY	15000
+
+
+#define PMURES_UP_TRANSITION	2
+
+
+
+
+
+#define ECI_BW_20   0x0
+#define ECI_BW_25   0x1
+#define ECI_BW_30   0x2
+#define ECI_BW_35   0x3
+#define ECI_BW_40   0x4
+#define ECI_BW_45   0x5
+#define ECI_BW_50   0x6
+#define ECI_BW_ALL  0x7
+
+
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 0000000..76f05ae
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,276 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h,v 13.70 2008-03-28 19:17:04 Exp $
+ */
+
+
+#ifndef	_SBCONFIG_H
+#define	_SBCONFIG_H
+
+
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+
+#define SB_BUS_SIZE		0x10000		
+#define SB_BUS_BASE(b)		(SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define	SB_BUS_MAXCORES		(SB_BUS_SIZE / SI_CORE_SIZE)	
+
+
+#define	SBCONFIGOFF		0xf00		
+#define	SBCONFIGSIZE		256		
+
+#define SBIPSFLAG		0x08
+#define SBTPSFLAG		0x18
+#define	SBTMERRLOGA		0x48		
+#define	SBTMERRLOG		0x50		
+#define SBADMATCH3		0x60
+#define SBADMATCH2		0x68
+#define SBADMATCH1		0x70
+#define SBIMSTATE		0x90
+#define SBINTVEC		0x94
+#define SBTMSTATELOW		0x98
+#define SBTMSTATEHIGH		0x9c
+#define SBBWA0			0xa0
+#define SBIMCONFIGLOW		0xa8
+#define SBIMCONFIGHIGH		0xac
+#define SBADMATCH0		0xb0
+#define SBTMCONFIGLOW		0xb8
+#define SBTMCONFIGHIGH		0xbc
+#define SBBCONFIG		0xc0
+#define SBBSTATE		0xc8
+#define SBACTCNFG		0xd8
+#define	SBFLAGST		0xe8
+#define SBIDLOW			0xf8
+#define SBIDHIGH		0xfc
+
+
+
+#define SBIMERRLOGA		0xea8
+#define SBIMERRLOG		0xeb0
+#define SBTMPORTCONNID0		0xed8
+#define SBTMPORTLOCK0		0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+	uint32	PAD[2];
+	uint32	sbipsflag;		
+	uint32	PAD[3];
+	uint32	sbtpsflag;		
+	uint32	PAD[11];
+	uint32	sbtmerrloga;		
+	uint32	PAD;
+	uint32	sbtmerrlog;		
+	uint32	PAD[3];
+	uint32	sbadmatch3;		
+	uint32	PAD;
+	uint32	sbadmatch2;		
+	uint32	PAD;
+	uint32	sbadmatch1;		
+	uint32	PAD[7];
+	uint32	sbimstate;		
+	uint32	sbintvec;		
+	uint32	sbtmstatelow;		
+	uint32	sbtmstatehigh;		
+	uint32	sbbwa0;			
+	uint32	PAD;
+	uint32	sbimconfiglow;		
+	uint32	sbimconfighigh;		
+	uint32	sbadmatch0;		
+	uint32	PAD;
+	uint32	sbtmconfiglow;		
+	uint32	sbtmconfighigh;		
+	uint32	sbbconfig;		
+	uint32	PAD;
+	uint32	sbbstate;		
+	uint32	PAD[3];
+	uint32	sbactcnfg;		
+	uint32	PAD[3];
+	uint32	sbflagst;		
+	uint32	PAD[3];
+	uint32	sbidlow;		
+	uint32	sbidhigh;		
+} sbconfig_t;
+
+#endif 
+
+
+#define	SBIPS_INT1_MASK		0x3f		
+#define	SBIPS_INT1_SHIFT	0
+#define	SBIPS_INT2_MASK		0x3f00		
+#define	SBIPS_INT2_SHIFT	8
+#define	SBIPS_INT3_MASK		0x3f0000	
+#define	SBIPS_INT3_SHIFT	16
+#define	SBIPS_INT4_MASK		0x3f000000	
+#define	SBIPS_INT4_SHIFT	24
+
+
+#define	SBTPS_NUM0_MASK		0x3f		
+#define	SBTPS_F0EN0		0x40		
+
+
+#define	SBTMEL_CM		0x00000007	
+#define	SBTMEL_CI		0x0000ff00	
+#define	SBTMEL_EC		0x0f000000	
+#define	SBTMEL_ME		0x80000000	
+
+
+#define	SBIM_PC			0xf		
+#define	SBIM_AP_MASK		0x30		
+#define	SBIM_AP_BOTH		0x00		
+#define	SBIM_AP_TS		0x10		
+#define	SBIM_AP_TK		0x20		
+#define	SBIM_AP_RSV		0x30		
+#define	SBIM_IBE		0x20000		
+#define	SBIM_TO			0x40000		
+#define	SBIM_BY			0x01800000	
+#define	SBIM_RJ			0x02000000	
+
+
+#define	SBTML_RESET		0x0001		
+#define	SBTML_REJ_MASK		0x0006		
+#define	SBTML_REJ		0x0002		
+#define	SBTML_TMPREJ		0x0004		
+
+#define	SBTML_SICF_SHIFT	16		
+
+
+#define	SBTMH_SERR		0x0001		
+#define	SBTMH_INT		0x0002		
+#define	SBTMH_BUSY		0x0004		
+#define	SBTMH_TO		0x0020		
+
+#define	SBTMH_SISF_SHIFT	16		
+
+
+#define	SBBWA_TAB0_MASK		0xffff		
+#define	SBBWA_TAB1_MASK		0xffff		
+#define	SBBWA_TAB1_SHIFT	16
+
+
+#define	SBIMCL_STO_MASK		0x7		
+#define	SBIMCL_RTO_MASK		0x70		
+#define	SBIMCL_RTO_SHIFT	4
+#define	SBIMCL_CID_MASK		0xff0000	
+#define	SBIMCL_CID_SHIFT	16
+
+
+#define	SBIMCH_IEM_MASK		0xc		
+#define	SBIMCH_TEM_MASK		0x30		
+#define	SBIMCH_TEM_SHIFT	4
+#define	SBIMCH_BEM_MASK		0xc0		
+#define	SBIMCH_BEM_SHIFT	6
+
+
+#define	SBAM_TYPE_MASK		0x3		
+#define	SBAM_AD64		0x4		
+#define	SBAM_ADINT0_MASK	0xf8		
+#define	SBAM_ADINT0_SHIFT	3
+#define	SBAM_ADINT1_MASK	0x1f8		
+#define	SBAM_ADINT1_SHIFT	3
+#define	SBAM_ADINT2_MASK	0x1f8		
+#define	SBAM_ADINT2_SHIFT	3
+#define	SBAM_ADEN		0x400		
+#define	SBAM_ADNEG		0x800		
+#define	SBAM_BASE0_MASK		0xffffff00	
+#define	SBAM_BASE0_SHIFT	8
+#define	SBAM_BASE1_MASK		0xfffff000	
+#define	SBAM_BASE1_SHIFT	12
+#define	SBAM_BASE2_MASK		0xffff0000	
+#define	SBAM_BASE2_SHIFT	16
+
+
+#define	SBTMCL_CD_MASK		0xff		
+#define	SBTMCL_CO_MASK		0xf800		
+#define	SBTMCL_CO_SHIFT		11
+#define	SBTMCL_IF_MASK		0xfc0000	
+#define	SBTMCL_IF_SHIFT		18
+#define	SBTMCL_IM_MASK		0x3000000	
+#define	SBTMCL_IM_SHIFT		24
+
+
+#define	SBTMCH_BM_MASK		0x3		
+#define	SBTMCH_RM_MASK		0x3		
+#define	SBTMCH_RM_SHIFT		2
+#define	SBTMCH_SM_MASK		0x30		
+#define	SBTMCH_SM_SHIFT		4
+#define	SBTMCH_EM_MASK		0x300		
+#define	SBTMCH_EM_SHIFT		8
+#define	SBTMCH_IM_MASK		0xc00		
+#define	SBTMCH_IM_SHIFT		10
+
+
+#define	SBBC_LAT_MASK		0x3		
+#define	SBBC_MAX0_MASK		0xf0000		
+#define	SBBC_MAX0_SHIFT		16
+#define	SBBC_MAX1_MASK		0xf00000	
+#define	SBBC_MAX1_SHIFT		20
+
+
+#define	SBBS_SRD		0x1		
+#define	SBBS_HRD		0x2		
+
+
+#define	SBIDL_CS_MASK		0x3		
+#define	SBIDL_AR_MASK		0x38		
+#define	SBIDL_AR_SHIFT		3
+#define	SBIDL_SYNCH		0x40		
+#define	SBIDL_INIT		0x80		
+#define	SBIDL_MINLAT_MASK	0xf00		
+#define	SBIDL_MINLAT_SHIFT	8
+#define	SBIDL_MAXLAT		0xf000		
+#define	SBIDL_MAXLAT_SHIFT	12
+#define	SBIDL_FIRST		0x10000		
+#define	SBIDL_CW_MASK		0xc0000		
+#define	SBIDL_CW_SHIFT		18
+#define	SBIDL_TP_MASK		0xf00000	
+#define	SBIDL_TP_SHIFT		20
+#define	SBIDL_IP_MASK		0xf000000	
+#define	SBIDL_IP_SHIFT		24
+#define	SBIDL_RV_MASK		0xf0000000	
+#define	SBIDL_RV_SHIFT		28
+#define	SBIDL_RV_2_2		0x00000000	
+#define	SBIDL_RV_2_3		0x10000000	
+
+
+#define	SBIDH_RC_MASK		0x000f		
+#define	SBIDH_RCE_MASK		0x7000		
+#define	SBIDH_RCE_SHIFT		8
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define	SBIDH_CC_MASK		0x8ff0		
+#define	SBIDH_CC_SHIFT		4
+#define	SBIDH_VC_MASK		0xffff0000	
+#define	SBIDH_VC_SHIFT		16
+
+#define	SB_COMMIT		0xfd8		
+
+
+#define	SB_VEND_BCM		0x4243		
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 0000000..05d0587
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,327 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h,v 13.20.2.3 2010-10-14 22:21:29 Exp $
+ */
+
+
+#ifndef	_sbhnddma_h_
+#define	_sbhnddma_h_
+
+
+
+
+
+
+
+typedef volatile struct {
+	uint32	control;		
+	uint32	addr;			
+	uint32	ptr;			
+	uint32	status;			
+} dma32regs_t;
+
+typedef volatile struct {
+	dma32regs_t	xmt;		
+	dma32regs_t	rcv;		
+} dma32regp_t;
+
+typedef volatile struct {	
+	uint32	fifoaddr;		
+	uint32	fifodatalow;		
+	uint32	fifodatahigh;		
+	uint32	pad;			
+} dma32diag_t;
+
+
+typedef volatile struct {
+	uint32	ctrl;		
+	uint32	addr;		
+} dma32dd_t;
+
+
+#define	D32RINGALIGN_BITS	12
+#define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
+#define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
+
+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
+
+
+#define	XC_XE		((uint32)1 << 0)	
+#define	XC_SE		((uint32)1 << 1)	
+#define	XC_LE		((uint32)1 << 2)	
+#define	XC_FL		((uint32)1 << 4)	
+#define	XC_PD		((uint32)1 << 11)	
+#define	XC_AE		((uint32)3 << 16)	
+#define	XC_AE_SHIFT	16
+#define XC_BL_MASK	0x001C0000			
+#define XC_BL_SHIFT	18
+
+
+#define	XP_LD_MASK	0xfff			
+
+
+#define	XS_CD_MASK	0x0fff			
+#define	XS_XS_MASK	0xf000			
+#define	XS_XS_SHIFT	12
+#define	XS_XS_DISABLED	0x0000			
+#define	XS_XS_ACTIVE	0x1000			
+#define	XS_XS_IDLE	0x2000			
+#define	XS_XS_STOPPED	0x3000			
+#define	XS_XS_SUSP	0x4000			
+#define	XS_XE_MASK	0xf0000			
+#define	XS_XE_SHIFT	16
+#define	XS_XE_NOERR	0x00000			
+#define	XS_XE_DPE	0x10000			
+#define	XS_XE_DFU	0x20000			
+#define	XS_XE_BEBR	0x30000			
+#define	XS_XE_BEDA	0x40000			
+#define	XS_AD_MASK	0xfff00000		
+#define	XS_AD_SHIFT	20
+
+
+#define	RC_RE		((uint32)1 << 0)	
+#define	RC_RO_MASK	0xfe			
+#define	RC_RO_SHIFT	1
+#define	RC_FM		((uint32)1 << 8)	
+#define	RC_SH		((uint32)1 << 9)	
+#define	RC_OC		((uint32)1 << 10)	
+#define	RC_PD		((uint32)1 << 11)	
+#define	RC_AE		((uint32)3 << 16)	
+#define	RC_AE_SHIFT	16
+#define RC_BL_MASK	0x001C0000			
+#define RC_BL_SHIFT	18
+
+
+#define	RP_LD_MASK	0xfff			
+
+
+#define	RS_CD_MASK	0x0fff			
+#define	RS_RS_MASK	0xf000			
+#define	RS_RS_SHIFT	12
+#define	RS_RS_DISABLED	0x0000			
+#define	RS_RS_ACTIVE	0x1000			
+#define	RS_RS_IDLE	0x2000			
+#define	RS_RS_STOPPED	0x3000			
+#define	RS_RE_MASK	0xf0000			
+#define	RS_RE_SHIFT	16
+#define	RS_RE_NOERR	0x00000			
+#define	RS_RE_DPE	0x10000			
+#define	RS_RE_DFO	0x20000			
+#define	RS_RE_BEBW	0x30000			
+#define	RS_RE_BEDA	0x40000			
+#define	RS_AD_MASK	0xfff00000		
+#define	RS_AD_SHIFT	20
+
+
+#define	FA_OFF_MASK	0xffff			
+#define	FA_SEL_MASK	0xf0000			
+#define	FA_SEL_SHIFT	16
+#define	FA_SEL_XDD	0x00000			
+#define	FA_SEL_XDP	0x10000			
+#define	FA_SEL_RDD	0x40000			
+#define	FA_SEL_RDP	0x50000			
+#define	FA_SEL_XFD	0x80000			
+#define	FA_SEL_XFP	0x90000			
+#define	FA_SEL_RFD	0xc0000			
+#define	FA_SEL_RFP	0xd0000			
+#define	FA_SEL_RSD	0xe0000			
+#define	FA_SEL_RSP	0xf0000			
+
+
+#define	CTRL_BC_MASK	0x00001fff		
+#define	CTRL_AE		((uint32)3 << 16)	
+#define	CTRL_AE_SHIFT	16
+#define	CTRL_PARITY	((uint32)3 << 18)	
+#define	CTRL_EOT	((uint32)1 << 28)	
+#define	CTRL_IOC	((uint32)1 << 29)	
+#define	CTRL_EOF	((uint32)1 << 30)	
+#define	CTRL_SOF	((uint32)1 << 31)	
+
+
+#define	CTRL_CORE_MASK	0x0ff00000
+
+
+
+
+typedef volatile struct {
+	uint32	control;		
+	uint32	ptr;			
+	uint32	addrlow;		
+	uint32	addrhigh;		
+	uint32	status0;		
+	uint32	status1;		
+} dma64regs_t;
+
+typedef volatile struct {
+	dma64regs_t	tx;		
+	dma64regs_t	rx;		
+} dma64regp_t;
+
+typedef volatile struct {		
+	uint32	fifoaddr;		
+	uint32	fifodatalow;		
+	uint32	fifodatahigh;		
+	uint32	pad;			
+} dma64diag_t;
+
+
+typedef volatile struct {
+	uint32	ctrl1;		
+	uint32	ctrl2;		
+	uint32	addrlow;	
+	uint32	addrhigh;	
+} dma64dd_t;
+
+
+#define D64RINGALIGN_BITS	13
+#define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
+#define	D64RINGALIGN		(1 << D64RINGALIGN_BITS)
+
+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
+
+
+#define D64_DEF_USBBURSTLEN		2
+#define D64_DEF_SDIOBURSTLEN	1
+
+
+#define	D64_XC_XE		0x00000001	
+#define	D64_XC_SE		0x00000002	
+#define	D64_XC_LE		0x00000004	
+#define	D64_XC_FL		0x00000010	
+#define	D64_XC_PD		0x00000800	
+#define	D64_XC_AE		0x00030000	
+#define	D64_XC_AE_SHIFT		16
+#define D64_XC_BL_MASK	0x001C0000	
+#define D64_XC_BL_SHIFT		18
+
+
+#define	D64_XP_LD_MASK		0x00000fff	
+
+
+#define	D64_XS0_CD_MASK		0x00001fff	
+#define	D64_XS0_XS_MASK		0xf0000000     	
+#define	D64_XS0_XS_SHIFT		28
+#define	D64_XS0_XS_DISABLED	0x00000000	
+#define	D64_XS0_XS_ACTIVE	0x10000000	
+#define	D64_XS0_XS_IDLE		0x20000000	
+#define	D64_XS0_XS_STOPPED	0x30000000	
+#define	D64_XS0_XS_SUSP		0x40000000	
+
+#define	D64_XS1_AD_MASK		0x00001fff	
+#define	D64_XS1_XE_MASK		0xf0000000     	
+#define	D64_XS1_XE_SHIFT		28
+#define	D64_XS1_XE_NOERR	0x00000000	
+#define	D64_XS1_XE_DPE		0x10000000	
+#define	D64_XS1_XE_DFU		0x20000000	
+#define	D64_XS1_XE_DTE		0x30000000	
+#define	D64_XS1_XE_DESRE	0x40000000	
+#define	D64_XS1_XE_COREE	0x50000000	
+
+
+#define	D64_RC_RE		0x00000001	
+#define	D64_RC_RO_MASK		0x000000fe	
+#define	D64_RC_RO_SHIFT		1
+#define	D64_RC_FM		0x00000100	
+#define	D64_RC_SH		0x00000200	
+#define	D64_RC_OC		0x00000400	
+#define	D64_RC_PD		0x00000800	
+#define	D64_RC_AE		0x00030000	
+#define	D64_RC_AE_SHIFT		16
+#define D64_RC_BL_MASK	0x001C0000	
+#define D64_RC_BL_SHIFT		18
+
+
+#define DMA_CTRL_PEN		(1 << 0)	
+#define DMA_CTRL_ROC		(1 << 1)	
+#define DMA_CTRL_RXMULTI	(1 << 2)	
+#define DMA_CTRL_UNFRAMED	(1 << 3)	
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+
+
+#define	D64_RP_LD_MASK		0x00000fff	
+
+
+#define	D64_RS0_CD_MASK		0x00001fff	
+#define	D64_RS0_RS_MASK		0xf0000000     	
+#define	D64_RS0_RS_SHIFT		28
+#define	D64_RS0_RS_DISABLED	0x00000000	
+#define	D64_RS0_RS_ACTIVE	0x10000000	
+#define	D64_RS0_RS_IDLE		0x20000000	
+#define	D64_RS0_RS_STOPPED	0x30000000	
+#define	D64_RS0_RS_SUSP		0x40000000	
+
+#define	D64_RS1_AD_MASK		0x0001ffff	
+#define	D64_RS1_RE_MASK		0xf0000000     	
+#define	D64_RS1_RE_SHIFT		28
+#define	D64_RS1_RE_NOERR	0x00000000	
+#define	D64_RS1_RE_DPO		0x10000000	
+#define	D64_RS1_RE_DFU		0x20000000	
+#define	D64_RS1_RE_DTE		0x30000000	
+#define	D64_RS1_RE_DESRE	0x40000000	
+#define	D64_RS1_RE_COREE	0x50000000	
+
+
+#define	D64_FA_OFF_MASK		0xffff		
+#define	D64_FA_SEL_MASK		0xf0000		
+#define	D64_FA_SEL_SHIFT	16
+#define	D64_FA_SEL_XDD		0x00000		
+#define	D64_FA_SEL_XDP		0x10000		
+#define	D64_FA_SEL_RDD		0x40000		
+#define	D64_FA_SEL_RDP		0x50000		
+#define	D64_FA_SEL_XFD		0x80000		
+#define	D64_FA_SEL_XFP		0x90000		
+#define	D64_FA_SEL_RFD		0xc0000		
+#define	D64_FA_SEL_RFP		0xd0000		
+#define	D64_FA_SEL_RSD		0xe0000		
+#define	D64_FA_SEL_RSP		0xf0000		
+
+
+#define D64_CTRL_COREFLAGS	0x0ff00000	
+#define	D64_CTRL1_EOT		((uint32)1 << 28)	
+#define	D64_CTRL1_IOC		((uint32)1 << 29)	
+#define	D64_CTRL1_EOF		((uint32)1 << 30)	
+#define	D64_CTRL1_SOF		((uint32)1 << 31)	
+
+
+#define	D64_CTRL2_BC_MASK	0x00007fff	
+#define	D64_CTRL2_AE		0x00030000	
+#define	D64_CTRL2_AE_SHIFT	16
+#define D64_CTRL2_PARITY	0x00040000      
+
+
+#define	D64_CTRL_CORE_MASK	0x0ff00000
+
+#define D64_RX_FRM_STS_LEN	0x0000ffff	
+#define D64_RX_FRM_STS_OVFL	0x00800000	
+#define D64_RX_FRM_STS_DSCRCNT	0x0f000000	
+#define D64_RX_FRM_STS_DATATYPE	0xf0000000	
+
+
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} dma_rxh_t;
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 0000000..aba914b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,109 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h,v 13.48.12.6 2010-11-04 09:39:42 Exp $
+ */
+
+
+#ifndef	_SBPCMCIA_H
+#define	_SBPCMCIA_H
+
+
+
+
+#define	PCMCIA_FCR		(0x700 / 2)
+
+#define	FCR0_OFF		0
+#define	FCR1_OFF		(0x40 / 2)
+#define	FCR2_OFF		(0x80 / 2)
+#define	FCR3_OFF		(0xc0 / 2)
+
+#define	PCMCIA_FCR0		(0x700 / 2)
+#define	PCMCIA_FCR1		(0x740 / 2)
+#define	PCMCIA_FCR2		(0x780 / 2)
+#define	PCMCIA_FCR3		(0x7c0 / 2)
+
+
+
+#define	PCMCIA_COR		0
+
+#define	COR_RST			0x80
+#define	COR_LEV			0x40
+#define	COR_IRQEN		0x04
+#define	COR_BLREN		0x01
+#define	COR_FUNEN		0x01
+
+
+#define	PCICIA_FCSR		(2 / 2)
+#define	PCICIA_PRR		(4 / 2)
+#define	PCICIA_SCR		(6 / 2)
+#define	PCICIA_ESR		(8 / 2)
+
+
+#define PCM_MEMOFF		0x0000
+#define F0_MEMOFF		0x1000
+#define F1_MEMOFF		0x2000
+#define F2_MEMOFF		0x3000
+#define F3_MEMOFF		0x4000
+
+
+#define MEM_ADDR0		(0x728 / 2)
+#define MEM_ADDR1		(0x72a / 2)
+#define MEM_ADDR2		(0x72c / 2)
+
+
+#define PCMCIA_ADDR0		(0x072e / 2)
+#define PCMCIA_ADDR1		(0x0730 / 2)
+#define PCMCIA_ADDR2		(0x0732 / 2)
+
+#define MEM_SEG			(0x0734 / 2)
+#define SROM_CS			(0x0736 / 2)
+#define SROM_DATAL		(0x0738 / 2)
+#define SROM_DATAH		(0x073a / 2)
+#define SROM_ADDRL		(0x073c / 2)
+#define SROM_ADDRH		(0x073e / 2)
+#define	SROM_INFO2		(0x0772 / 2)	
+#define	SROM_INFO		(0x07be / 2)	
+
+
+#define SROM_IDLE		0
+#define SROM_WRITE		1
+#define SROM_READ		2
+#define SROM_WEN		4
+#define SROM_WDS		7
+#define SROM_DONE		8
+
+
+#define	SRI_SZ_MASK		0x03
+#define	SRI_BLANK		0x04
+#define	SRI_OTP			0x80
+
+
+
+#define SBTML_INT_ACK		0x40000		
+#define SBTML_INT_EN		0x20000		
+
+
+#define SBTMH_INT_STATUS	0x40000		
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 0000000..4280d5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,166 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h,v 13.34 2009-03-11 20:27:16 Exp $
+ */
+
+#ifndef	_SBSDIO_H
+#define	_SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION		3	/* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS			0x10000		/* sprom command and status */
+#define SBSDIO_SPROM_INFO		0x10001		/* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW		0x10002		/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003 	/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004		/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH		0x10005		/* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA		0x10006		/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN		0x10007		/* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK		0x10008		/* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL		0x10009		/* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A		/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B		/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C		/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D		/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E		/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 	0x1000F		/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019		/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A		/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B		/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C		/* Read Frame Byte Count High */
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000 	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001C 	/* f1 misc register end */
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE		0
+#define SBSDIO_SPROM_WRITE		1
+#define SBSDIO_SPROM_READ		2
+#define SBSDIO_SPROM_WEN		4
+#define SBSDIO_SPROM_WDS		7
+#define SBSDIO_SPROM_DONE		8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK			0x03		/* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK			0x04		/* depreciated in corerev 6 */
+#define	SROM_OTP			0x80		/* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL		0x01		/* or'd with onchip xtal_pu,
+							 * 1: power on oscillator
+							 * (for 4318 only)
+							 */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK		0x7f		/* number of words - 1 for sd device
+							 * to wait before sending data to host
+							 */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY		0x01		/* 1: device will assert busy signal when
+							 * receiving CMD53
+							 */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02		/* 1: assertion of sdio interrupt is
+							 * synchronous to the sdio clock
+							 */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04		/* 1: mask all interrupts to host
+							 * except the chipActive (rev 8)
+							 */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08		/* 1: isolate internal sdio signals, put
+							 * external pads in tri-state; requires
+							 * sdio bus power cycle to clear (rev 9)
+							 */
+#define SBSDIO_DEVCTL_SB_RST_CTL	0x30		/* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL	0x00		/*   Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET	0x10		/*   Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET	0x20		/*   Force no backplane reset */
+
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP		0x01		/* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT			0x02		/* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP		0x04		/* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ		0x08		/* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ		0x10		/* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20		/* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL		0x40		/* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL			0x80		/* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL		0x40
+#define SBSDIO_Rev8_ALP_AVAIL		0x80
+
+#define SBSDIO_AVBITS			(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)		((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)		(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)		(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly)	(SBSDIO_ALPAV(regval) && \
+					(alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0		0x01		/* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1		0x02		/* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2		0x04		/* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD		0x08		/* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL		0x0f		/* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF		/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000		/* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK		0x80		/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff		/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU		/* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000	/* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON		0x1000		/* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200		/* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT       0x078           /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF		/* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6		/* manfid tuple length, include tuple,
+							 * link bytes
+							 */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET		0x8		/* 8 control bytes first, CIS starts from
+							 * 8th byte
+							 */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX	64		/* sdio byte mode: maximum length of one
+							 * data comamnd
+							 */
+
+#define SBSDIO_CORE_ADDR_MASK		0x1FFFF		/* sdio core function one address mask */
+
+#endif	/* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 0000000..107a8b0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,293 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h,v 13.38 2009-09-22 22:56:45 Exp $
+ */
+
+#ifndef	_sbsdpcmdev_h_
+#define	_sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	dma64regs_t	xmt;		/* dma tx */
+	uint32 PAD[2];
+	dma64regs_t	rcv;		/* dma rx */
+	uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+	dma64p_t dma64regs[2];
+	dma64diag_t dmafifo;		/* DMA Diagnostic Regs, 0x280-0x28c */
+	uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+	dma32regp_t dma32regs[2];	/* dma tx & rx, 0x200-0x23c */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x240-0x24c */
+	uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+	dma32regp_t dmaregs;		/* DMA Regs, 0x200-0x21c, rev8 */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x220-0x22c */
+	uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+	uint32 corecontrol;		/* CoreControl, 0x000, rev8 */
+	uint32 corestatus;		/* CoreStatus, 0x004, rev8  */
+	uint32 PAD[1];
+	uint32 biststatus;		/* BistStatus, 0x00c, rev8  */
+
+	/* PCMCIA access */
+	uint16 pcmciamesportaladdr;	/* PcmciaMesPortalAddr, 0x010, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciamesportalmask;	/* PcmciaMesPortalMask, 0x014, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciawrframebc;		/* PcmciaWrFrameBC, 0x018, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciaunderflowtimer;	/* PcmciaUnderflowTimer, 0x01c, rev8   */
+	uint16 PAD[1];
+
+	/* interrupt */
+	uint32 intstatus;		/* IntStatus, 0x020, rev8   */
+	uint32 hostintmask;		/* IntHostMask, 0x024, rev8   */
+	uint32 intmask;			/* IntSbMask, 0x028, rev8   */
+	uint32 sbintstatus;		/* SBIntStatus, 0x02c, rev8   */
+	uint32 sbintmask;		/* SBIntMask, 0x030, rev8   */
+	uint32 funcintmask;		/* SDIO Function Interrupt Mask, SDIO rev4 */
+	uint32 PAD[2];
+	uint32 tosbmailbox;		/* ToSBMailbox, 0x040, rev8   */
+	uint32 tohostmailbox;		/* ToHostMailbox, 0x044, rev8   */
+	uint32 tosbmailboxdata;		/* ToSbMailboxData, 0x048, rev8   */
+	uint32 tohostmailboxdata;	/* ToHostMailboxData, 0x04c, rev8   */
+
+	/* synchronized access to registers in SDIO clock domain */
+	uint32 sdioaccess;		/* SdioAccess, 0x050, rev8   */
+	uint32 PAD[3];
+
+	/* PCMCIA frame control */
+	uint8 pcmciaframectrl;		/* pcmciaFrameCtrl, 0x060, rev8   */
+	uint8 PAD[3];
+	uint8 pcmciawatermark;		/* pcmciaWaterMark, 0x064, rev8   */
+	uint8 PAD[155];
+
+	/* interrupt batching control */
+	uint32 intrcvlazy;		/* IntRcvLazy, 0x100, rev8 */
+	uint32 PAD[3];
+
+	/* counters */
+	uint32 cmd52rd;			/* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+	uint32 cmd52wr;			/* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+	uint32 cmd53rd;			/* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+	uint32 cmd53wr;			/* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+	uint32 abort;			/* AbortCount, 0x120, rev8, SDIO: aborts */
+	uint32 datacrcerror;		/* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+	uint32 rdoutofsync;		/* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+	uint32 wroutofsync;		/* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+	uint32 writebusy;		/* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+	uint32 readwait;		/* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+	uint32 readterm;		/* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+	uint32 writeterm;		/* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+	uint32 PAD[40];
+	uint32 clockctlstatus;		/* ClockCtlStatus, 0x1e0, rev8 */
+	uint32 PAD[7];
+
+	/* DMA engines */
+	volatile union {
+		pcmdma32_t pcm32;
+		sdiodma32_t sdiod32;
+		sdiodma64_t sdiod64;
+	} dma;
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* PCMCIA FCR, 0x600-6ff, rev6 */
+	uint16 PAD[55];
+
+	/* PCMCIA backplane access */
+	uint16 backplanecsr;		/* BackplaneCSR, 0x76E, rev6 */
+	uint16 backplaneaddr0;		/* BackplaneAddr0, 0x770, rev6 */
+	uint16 backplaneaddr1;		/* BackplaneAddr1, 0x772, rev6 */
+	uint16 backplaneaddr2;		/* BackplaneAddr2, 0x774, rev6 */
+	uint16 backplaneaddr3;		/* BackplaneAddr3, 0x776, rev6 */
+	uint16 backplanedata0;		/* BackplaneData0, 0x778, rev6 */
+	uint16 backplanedata1;		/* BackplaneData1, 0x77a, rev6 */
+	uint16 backplanedata2;		/* BackplaneData2, 0x77c, rev6 */
+	uint16 backplanedata3;		/* BackplaneData3, 0x77e, rev6 */
+	uint16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	uint16 spromstatus;		/* SPROMStatus, 0x7BE, rev2 */
+	uint32 PAD[464];
+
+	/* Sonics SiliconBackplane registers */
+	sbconfig_t sbconfig;		/* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY		(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal causes backplane reset */
+#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE	(1 << 4)	/* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL	(1 << 5)	/* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE	(1 << 0)	/* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV	(1 << 1)	/* 1=smartDev enabled */
+#define CS_F2ENABLED	(1 << 2)	/* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK	0x7fff	/* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK	0x7fff	/* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK	0xffff	/* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK		0x07ff	/* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0		/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4		/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip transitioned from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)	/* DMA Errors */
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR	(1 << 8)	/* Backplane SError (write) */
+#define I_SB_RESPERR	(1 << 9)	/* Backplane Response Error (read) */
+#define I_SB_SPROMERR	(1 << 10)	/* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK	0x000000ff	/* Read/Write Data Mask */
+#define SDA_ADDR_MASK	0x000fff00	/* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT	8		/* Read/Write Address Shift */
+#define SDA_WRITE	0x01000000	/* Write bit  */
+#define SDA_READ	0x00000000	/* Write bit cleared for Read */
+#define SDA_BUSY	0x80000000	/* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE		0x000	/* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE	0x100	/* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE	0x200	/* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE	0x300	/* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA	0x006	/* ChipControlData */
+#define SDA_CHIPCONTROLENAB	0x007	/* ChipControlEnable */
+#define SDA_F2WATERMARK		0x008	/* Function 2 Watermark */
+#define SDA_DEVICECONTROL	0x009	/* DeviceControl */
+#define SDA_SBADDRLOW		0x00a	/* SbAddrLow */
+#define SDA_SBADDRMID		0x00b	/* SbAddrMid */
+#define SDA_SBADDRHIGH		0x00c	/* SbAddrHigh */
+#define SDA_FRAMECTRL		0x00d	/* FrameCtrl */
+#define SDA_CHIPCLOCKCSR	0x00e	/* ChipClockCSR */
+#define SDA_SDIOPULLUP		0x00f	/* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW	0x019	/* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH	0x01a	/* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW	0x01b	/* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH	0x01c	/* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK	0x7f	/* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK	0x80	/* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK	0xff	/* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK	0xff	/* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define PFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+
+/* intrcvlazy */
+#define	IRL_TO_MASK	0x00ffffff	/* timeout */
+#define	IRL_FC_MASK	0xff000000	/* frame count */
+#define	IRL_FC_SHIFT	24		/* frame count */
+
+/* rx header */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC		0x0001		/* CRC error detected */
+#define RXF_WOOS	0x0002		/* write frame out of sync */
+#define RXF_WF_TERM	0x0004		/* write frame terminated */
+#define RXF_ABORT	0x0008		/* write frame aborted */
+#define RXF_DISCARD	(RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT)	/* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN	4	/* HW frametag: 2 bytes len, 2 bytes check val */
+
+#endif	/* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 0000000..1cba422
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,186 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h,v 13.15 2009-10-02 16:55:44 Exp $
+ */
+
+
+#ifndef	_SBSOCRAM_H
+#define	_SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	
+
+
+typedef volatile struct sbsocramregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;	
+	uint32	errlogaddr;	
+	
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[1];
+	uint32	bankinfo;	
+	uint32	PAD[15];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;		
+	uint32  PAD[133];
+	uint32  sr_control;     
+	uint32  sr_status;      
+	uint32  sr_address;     
+	uint32  sr_data;        
+} sbsocramregs_t;
+
+#endif	
+
+
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+
+#define	SRCI_PT_MASK		0x00070000	
+#define	SRCI_PT_SHIFT		16
+
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+
+#define	SRCI_ROMNB_MASK		0xf000
+#define	SRCI_ROMNB_SHIFT	12
+#define	SRCI_ROMBSZ_MASK	0xf00
+#define	SRCI_ROMBSZ_SHIFT	8
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+
+#define SR_BSZ_BASE		14
+
+
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000	
+#define	SRSC_SBYEN_SHIFT	24
+
+
+#define SRPC_PMU_STBYDIS_MASK	0x00000010	
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+
+#define SOCRAM_BANKINFO_SZMASK		0x3f
+#define SOCRAM_BANKIDX_ROM_MASK		0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
+
+#define SOCRAM_MEMTYPE_RAM		0
+#define SOCRAM_MEMTYPE_R0M		1
+#define SOCRAM_MEMTYPE_DEVRAM		2
+
+#define	SOCRAM_BANKINFO_REG		0x40
+#define	SOCRAM_BANKIDX_REG		0x10
+#define	SOCRAM_BANKINFO_STDBY_MASK	0x400
+#define	SOCRAM_BANKINFO_STDBY_TIMER	0x800
+
+
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT	13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK	0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT	14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK	0x4000
+
+
+#define SOCRAM_DEVRAMBANK_MASK		0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT		12
+
+
+#define	SOCRAM_BANKINFO_SZBASE		8192
+#define SOCRAM_BANKSIZE_SHIFT		13	
+
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 0000000..7e94e7d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,606 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h,v 13.27.14.1 2010-09-07 13:37:45 Exp $
+ */
+
+#ifndef	_SDIO_H
+#define	_SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+	uint8	cccr_sdio_rev;		/* RO, cccr and sdio revision */
+	uint8	sd_rev;			/* RO, sd spec revision */
+	uint8	io_en;			/* I/O enable */
+	uint8	io_rdy;			/* I/O ready reg */
+	uint8	intr_ctl;		/* Master and per function interrupt enable control */
+	uint8	intr_status;		/* RO, interrupt pending status */
+	uint8	io_abort;		/* read/write abort or reset all functions */
+	uint8	bus_inter;		/* bus interface control */
+	uint8	capability;		/* RO, card capability */
+
+	uint8	cis_base_low;		/* 0x9 RO, common CIS base address, LSB */
+	uint8	cis_base_mid;
+	uint8	cis_base_high;		/* 0xB RO, common CIS base address, MSB */
+
+	/* suspend/resume registers */
+	uint8	bus_suspend;		/* 0xC */
+	uint8	func_select;		/* 0xD */
+	uint8	exec_flag;		/* 0xE */
+	uint8	ready_flag;		/* 0xF */
+
+	uint8	fn0_blk_size[2];	/* 0x10(LSB), 0x11(MSB) */
+
+	uint8	power_control;		/* 0x12 (SDIO version 1.10) */
+
+	uint8	speed_control;		/* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV			0x00
+#define SDIOD_CCCR_SDREV		0x01
+#define SDIOD_CCCR_IOEN			0x02
+#define SDIOD_CCCR_IORDY		0x03
+#define SDIOD_CCCR_INTEN		0x04
+#define SDIOD_CCCR_INTPEND		0x05
+#define SDIOD_CCCR_IOABORT		0x06
+#define SDIOD_CCCR_BICTRL		0x07
+#define SDIOD_CCCR_CAPABLITIES		0x08
+#define SDIOD_CCCR_CISPTR_0		0x09
+#define SDIOD_CCCR_CISPTR_1		0x0A
+#define SDIOD_CCCR_CISPTR_2		0x0B
+#define SDIOD_CCCR_BUSSUSP		0x0C
+#define SDIOD_CCCR_FUNCSEL		0x0D
+#define SDIOD_CCCR_EXECFLAGS		0x0E
+#define SDIOD_CCCR_RDYFLAGS		0x0F
+#define SDIOD_CCCR_BLKSIZE_0		0x10
+#define SDIOD_CCCR_BLKSIZE_1		0x11
+#define SDIOD_CCCR_POWER_CONTROL	0x12
+#define SDIOD_CCCR_SPEED_CONTROL	0x13
+#define SDIOD_CCCR_UHSI_SUPPORT		0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH	0x15
+#define SDIOD_CCCR_INTR_EXTN		0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_SEPINT		0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK	0xf0	/* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK	0x0f	/* CCCR format version number */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK		0x0f	/* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02	/* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2	0x04	/* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02	/* function 1 I/O ready */
+#define SDIO_FUNC_READY_2	0x04	/* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN	0x1	/* interrupt enable master */
+#define INTR_CTL_FUNC1_EN	0x2	/* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN	0x4	/* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2	/* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2	0x4	/* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL	0x08	/* I/O card reset */
+#define IO_ABORT_FUNC_MASK	0x07	/* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS	0x80	/* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP	0x40	/* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN	0x20	/* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK	0x03	/* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT	0x02	/* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT	0x00	/* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS		0x80	/* 4-bit support for low speed card */
+#define SDIO_CAP_LSC		0x40	/* low speed card */
+#define SDIO_CAP_E4MI		0x20	/* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI		0x10	/* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS		0x08	/* support suspend/resume */
+#define SDIO_CAP_SRW		0x04	/* support read wait */
+#define SDIO_CAP_SMB		0x02	/* support multi-block transfer */
+#define SDIO_CAP_SDC		0x01	/* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC		0x01	/* supports master power control (RO) */
+#define SDIO_POWER_EMPC		0x02	/* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS		0x01	/* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS		0x02	/* enable high-speed [clocking] mode (RW) */
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S	1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S	0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S	0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M	BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S	4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S	0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S	1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK	0x01	/* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE		0x02	/* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI	0x04	/* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+	uint8	devctr;			/* device interface, CSA control */
+	uint8	ext_dev;		/* extended standard I/O device type code */
+	uint8	pwr_sel;		/* power selection support */
+	uint8	PAD[6];			/* reserved */
+
+	uint8	cis_low;		/* CIS LSB */
+	uint8	cis_mid;
+	uint8	cis_high;		/* CIS MSB */
+	uint8	csa_low;		/* code storage area, LSB */
+	uint8	csa_mid;
+	uint8	csa_high;		/* code storage area, MSB */
+	uint8	csa_dat_win;		/* data access window to function */
+
+	uint8	fnx_blk_size[2];	/* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS		7
+
+/* SDIO Device FBR Start Address  */
+#define SDIOD_FBR_STARTADDR		0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE			0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n)		((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR		0x00	/* basic info for function */
+#define SDIOD_FBR_EXT_DEV		0x01	/* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL		0x02	/* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0		0x09
+#define SDIOD_FBR_CISPTR_1		0x0A
+#define SDIOD_FBR_CISPTR_2		0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0		0x0C
+#define SDIOD_FBR_CSA_ADDR_1		0x0D
+#define SDIOD_FBR_CSA_ADDR_2		0x0E
+#define SDIOD_FBR_CSA_DATA		0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0		0x10
+#define SDIOD_FBR_BLKSIZE_1		0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC	0x0f	/* device interface code */
+#define SDIOD_FBR_DECVTR_CSA	0x40	/* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN	0x80	/* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE		0	/* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART		1
+#define SDIOD_DIC_BLUETOOTH_A	2
+#define SDIOD_DIC_BLUETOOTH_B	3
+#define SDIOD_DIC_GPS		4
+#define SDIOD_DIC_CAMERA	5
+#define SDIOD_DIC_PHS		6
+#define SDIOD_DIC_WLAN		7
+#define SDIOD_DIC_EXT		0xf	/* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS	0x01	/* supports power selection */
+#define SDIOD_PWR_SEL_EPS	0x02	/* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+#define SDIO_FUNC_3		3
+#define SDIO_FUNC_4		4
+#define SDIO_FUNC_5		5
+#define SDIO_FUNC_6		6
+#define SDIO_FUNC_7		7
+
+#define SD_CARD_TYPE_UNKNOWN	0	/* bad type or unrecognized */
+#define SD_CARD_TYPE_IO		1	/* IO only card */
+#define SD_CARD_TYPE_MEMORY	2	/* memory only card */
+#define SD_CARD_TYPE_COMBO	3	/* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE	2048	/* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE	1	/* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE		31
+#define CARDREG_STATUS_BIT_COMCRCERROR		23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND	22
+#define CARDREG_STATUS_BIT_ERROR		19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3	12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2	11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1	10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0	9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR	4
+
+
+
+#define SD_CMD_GO_IDLE_STATE		0	/* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND		1
+#define SD_CMD_MMC_SET_RCA		3
+#define SD_CMD_IO_SEND_OP_COND		5	/* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD	7
+#define SD_CMD_SEND_CSD			9
+#define SD_CMD_SEND_CID			10
+#define SD_CMD_STOP_TRANSMISSION	12
+#define SD_CMD_SEND_STATUS		13
+#define SD_CMD_GO_INACTIVE_STATE	15
+#define SD_CMD_SET_BLOCKLEN		16
+#define SD_CMD_READ_SINGLE_BLOCK	17
+#define SD_CMD_READ_MULTIPLE_BLOCK	18
+#define SD_CMD_WRITE_BLOCK		24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK	25
+#define SD_CMD_PROGRAM_CSD		27
+#define SD_CMD_SET_WRITE_PROT		28
+#define SD_CMD_CLR_WRITE_PROT		29
+#define SD_CMD_SEND_WRITE_PROT		30
+#define SD_CMD_ERASE_WR_BLK_START	32
+#define SD_CMD_ERASE_WR_BLK_END		33
+#define SD_CMD_ERASE			38
+#define SD_CMD_LOCK_UNLOCK		42
+#define SD_CMD_IO_RW_DIRECT		52	/* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED		53	/* mandatory for SDIO */
+#define SD_CMD_APP_CMD			55
+#define SD_CMD_GEN_CMD			56
+#define SD_CMD_READ_OCR			58
+#define SD_CMD_CRC_ON_OFF		59	/* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS		13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS	22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT	23
+#define SD_ACMD_SD_SEND_OP_COND		41
+#define SD_ACMD_SET_CLR_CARD_DETECT	42
+#define SD_ACMD_SEND_SCR		51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ		0   /* Read_Write: Read */
+#define SD_IO_OP_WRITE		1   /* Read_Write: Write */
+#define SD_IO_RW_NORMAL		0   /* no RAW */
+#define SD_IO_RW_RAW		1   /* RAW */
+#define SD_IO_BYTE_MODE		0   /* Byte Mode */
+#define SD_IO_BLOCK_MODE	1   /* BlockMode */
+#define SD_IO_FIXED_ADDRESS	0   /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS	1   /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+	 (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+	 (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE			0
+#define SD_RSP_NO_1			1
+#define SD_RSP_NO_2			2
+#define SD_RSP_NO_3			3
+#define SD_RSP_NO_4			4
+#define SD_RSP_NO_5			5
+#define SD_RSP_NO_6			6
+
+	/* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR	0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND	0x4000
+#define SD_RSP_MR6_ERROR		0x2000
+
+	/* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT			0x80
+#define SD_RSP_MR1_PARAMETER_ERROR	0x40
+#define SD_RSP_MR1_RFU5			0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR	0x10
+#define SD_RSP_MR1_COM_CRC_ERROR	0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND	0x04
+#define SD_RSP_MR1_RFU1			0x02
+#define SD_RSP_MR1_IDLE_STATE		0x01
+
+	/* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR		0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND	0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1	0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0	0x10
+#define SD_RSP_R5_ERROR			0x08
+#define SD_RSP_R5_RFU			0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR	0x02
+#define SD_RSP_R5_OUT_OF_RANGE		0x01
+
+#define SD_RSP_R5_ERRBITS		0xCB
+
+
+/* ------------------------------------------------
+ *  SDIO Commands and responses
+ *
+ *  I/O only commands are:
+ *      CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0		0
+#define SDIOH_CMD_3		3
+#define SDIOH_CMD_5		5
+#define SDIOH_CMD_7		7
+#define SDIOH_CMD_11		11
+#define SDIOH_CMD_15		15
+#define SDIOH_CMD_19		19
+#define SDIOH_CMD_52		52
+#define SDIOH_CMD_53		53
+#define SDIOH_CMD_59		59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE		0
+#define SDIOH_RSP_R1		1
+#define SDIOH_RSP_R2		2
+#define SDIOH_RSP_R3		3
+#define SDIOH_RSP_R4		4
+#define SDIOH_RSP_R5		5
+#define SDIOH_RSP_R6		6
+
+/*
+ *  SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS	0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * 	CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M		BITFIELD_MASK(24)
+#define CMD5_OCR_S		0
+
+#define CMD5_S18R_M		BITFIELD_MASK(1)
+#define CMD5_S18R_S		24
+
+#define CMD7_RCA_M		BITFIELD_MASK(16)
+#define CMD7_RCA_S		16
+
+#define CMD_15_RCA_M		BITFIELD_MASK(16)
+#define CMD_15_RCA_S		16
+
+#define CMD52_DATA_M		BITFIELD_MASK(8)  /* Bits [7:0]    - Write Data/Stuff bits of CMD52
+						   */
+#define CMD52_DATA_S		0
+#define CMD52_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD52_REG_ADDR_S	9
+#define CMD52_RAW_M		BITFIELD_MASK(1)  /* Bit  27       - Read after Write flag */
+#define CMD52_RAW_S		27
+#define CMD52_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD52_FUNCTION_S	28
+#define CMD52_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD52_RW_FLAG_S		31
+
+
+#define CMD53_BYTE_BLK_CNT_M	BITFIELD_MASK(9) /* Bits [8:0]     - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S	0
+#define CMD53_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD53_REG_ADDR_S	9
+#define CMD53_OP_CODE_M		BITFIELD_MASK(1)  /* Bit  26       - R/W Operation Code */
+#define CMD53_OP_CODE_S		26
+#define CMD53_BLK_MODE_M	BITFIELD_MASK(1)  /* Bit  27       - Block Mode */
+#define CMD53_BLK_MODE_S	27
+#define CMD53_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD53_FUNCTION_S	28
+#define CMD53_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD53_RW_FLAG_S		31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ *  -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M		BITFIELD_MASK(24) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S		0
+
+#define RSP4_S18A_M			BITFIELD_MASK(1) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S			24
+
+#define RSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S		24
+#define RSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  27      - Memory present */
+#define RSP4_MEM_PRESENT_S	27
+#define RSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S	28
+#define RSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  31      - SDIO card ready */
+#define RSP4_CARD_READY_S	31
+
+#define RSP6_STATUS_M		BITFIELD_MASK(16) /* Bits [15:0]  - Card status bits [19,22,23,12:0]
+						   */
+#define RSP6_STATUS_S		0
+#define RSP6_IO_RCA_M		BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S		16
+
+#define RSP1_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3       - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S	3
+#define RSP1_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5       - Card expects ACMD */
+#define RSP1_APP_CMD_S		5
+#define RSP1_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8       - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S	8
+#define RSP1_CURR_STATE_M	BITFIELD_MASK(4)  /* Bits [12:9] - State of card
+						   * when Cmd was received
+						   */
+#define RSP1_CURR_STATE_S	9
+#define RSP1_EARSE_RESET_M	BITFIELD_MASK(1)  /* Bit 13   - Erase seq cleared */
+#define RSP1_EARSE_RESET_S	13
+#define RSP1_CARD_ECC_DISABLE_M	BITFIELD_MASK(1)  /* Bit 14   - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S	14
+#define RSP1_WP_ERASE_SKIP_M	BITFIELD_MASK(1)  /* Bit 15   - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S	15
+#define RSP1_CID_CSD_OVERW_M	BITFIELD_MASK(1)  /* Bit 16   - Illegal write to CID or R/O bits
+						   * of CSD
+						   */
+#define RSP1_CID_CSD_OVERW_S	16
+#define RSP1_ERROR_M		BITFIELD_MASK(1)  /* Bit 19   - General/Unknown error */
+#define RSP1_ERROR_S		19
+#define RSP1_CC_ERROR_M		BITFIELD_MASK(1)  /* Bit 20   - Internal Card Control error */
+#define RSP1_CC_ERROR_S		20
+#define RSP1_CARD_ECC_FAILED_M	BITFIELD_MASK(1)  /* Bit 21   - Card internal ECC failed
+						   * to correct data
+						   */
+#define RSP1_CARD_ECC_FAILED_S	21
+#define RSP1_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit 22   - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S	22
+#define RSP1_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 23   - CRC check of previous command failed
+						   */
+#define RSP1_COM_CRC_ERROR_S	23
+#define RSP1_LOCK_UNLOCK_FAIL_M	BITFIELD_MASK(1)  /* Bit 24   - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S	24
+#define RSP1_CARD_LOCKED_M	BITFIELD_MASK(1)  /* Bit 25   - Card locked by the host */
+#define RSP1_CARD_LOCKED_S	25
+#define RSP1_WP_VIOLATION_M	BITFIELD_MASK(1)  /* Bit 26   - Attempt to program
+						   * write-protected blocks
+						   */
+#define RSP1_WP_VIOLATION_S	26
+#define RSP1_ERASE_PARAM_M	BITFIELD_MASK(1)  /* Bit 27   - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S	27
+#define RSP1_ERASE_SEQ_ERR_M	BITFIELD_MASK(1)  /* Bit 28   - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S	28
+#define RSP1_BLK_LEN_ERR_M	BITFIELD_MASK(1)  /* Bit 29   - Block length error */
+#define RSP1_BLK_LEN_ERR_S	29
+#define RSP1_ADDR_ERR_M		BITFIELD_MASK(1)  /* Bit 30   - Misaligned address */
+#define RSP1_ADDR_ERR_S		30
+#define RSP1_OUT_OF_RANGE_M	BITFIELD_MASK(1)  /* Bit 31   - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S	31
+
+
+#define RSP5_DATA_M		BITFIELD_MASK(8)  /* Bits [0:7]   - data */
+#define RSP5_DATA_S		0
+#define RSP5_FLAGS_M		BITFIELD_MASK(8)  /* Bit  [15:8]  - Rsp flags */
+#define RSP5_FLAGS_S		8
+#define RSP5_STUFF_M		BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S		16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M	BITFIELD_MASK(16) /* Bits [15:0]    - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S	0
+#define SPIRSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [18:16]   - Stuff bits */
+#define SPIRSP4_STUFF_S		16
+#define SPIRSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  19        - Memory present */
+#define SPIRSP4_MEM_PRESENT_S	19
+#define SPIRSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [22:20]   - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S	20
+#define SPIRSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  23        - SDIO card ready */
+#define SPIRSP4_CARD_READY_S	23
+#define SPIRSP4_IDLE_STATE_M	BITFIELD_MASK(1)  /* Bit  24        - idle state */
+#define SPIRSP4_IDLE_STATE_S	24
+#define SPIRSP4_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S	26
+#define SPIRSP4_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S	27
+#define SPIRSP4_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP4_FUNC_NUM_ERROR_S	28
+#define SPIRSP4_PARAM_ERROR_M	BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S	30
+#define SPIRSP4_START_BIT_M	BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP4_START_BIT_S	31
+
+#define SPIRSP5_DATA_M			BITFIELD_MASK(8)  /* Bits [23:16]   - R/W Data */
+#define SPIRSP5_DATA_S			16
+#define SPIRSP5_IDLE_STATE_M		BITFIELD_MASK(1)  /* Bit  24        - Idle state */
+#define SPIRSP5_IDLE_STATE_S		24
+#define SPIRSP5_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S		26
+#define SPIRSP5_COM_CRC_ERROR_M		BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S		27
+#define SPIRSP5_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP5_FUNC_NUM_ERROR_S	28
+#define SPIRSP5_PARAM_ERROR_M		BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S		30
+#define SPIRSP5_START_BIT_M		BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP5_START_BIT_S		31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3	- Authentication seq error
+							   */
+#define RSP6STAT_AKE_SEQ_ERROR_S	3
+#define RSP6STAT_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5	- Card expects ACMD */
+#define RSP6STAT_APP_CMD_S		5
+#define RSP6STAT_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8	- Ready for data
+							   * (buff empty)
+							   */
+#define RSP6STAT_READY_FOR_DATA_S	8
+#define RSP6STAT_CURR_STATE_M		BITFIELD_MASK(4)  /* Bits [12:9] - Card state at
+							   * Cmd reception
+							   */
+#define RSP6STAT_CURR_STATE_S		9
+#define RSP6STAT_ERROR_M		BITFIELD_MASK(1)  /* Bit 13  - General/Unknown error Bit 19
+							   */
+#define RSP6STAT_ERROR_S		13
+#define RSP6STAT_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit 14  - Illegal cmd for
+							   * card state Bit 22
+							   */
+#define RSP6STAT_ILLEGAL_CMD_S		14
+#define RSP6STAT_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 15  - CRC previous command
+							   * failed Bit 23
+							   */
+#define RSP6STAT_COM_CRC_ERROR_S	15
+
+#define SDIOH_XFER_TYPE_READ    SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE   SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT	0
+#define CMD_OPTION_TUNING	1
+
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 0000000..3d37c7a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,412 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h,v 13.17.2.3 2011-01-08 05:28:21 Exp $
+ */
+
+#ifndef	_SDIOH_H
+#define	_SDIOH_H
+
+#define SD_SysAddr			0x000
+#define SD_BlockSize			0x004
+#define SD_BlockCount 			0x006
+#define SD_Arg0				0x008
+#define SD_Arg1 			0x00A
+#define SD_TransferMode			0x00C
+#define SD_Command 			0x00E
+#define SD_Response0			0x010
+#define SD_Response1 			0x012
+#define SD_Response2			0x014
+#define SD_Response3 			0x016
+#define SD_Response4			0x018
+#define SD_Response5 			0x01A
+#define SD_Response6			0x01C
+#define SD_Response7 			0x01E
+#define SD_BufferDataPort0		0x020
+#define SD_BufferDataPort1 		0x022
+#define SD_PresentState			0x024
+#define SD_HostCntrl			0x028
+#define SD_PwrCntrl			0x029
+#define SD_BlockGapCntrl 		0x02A
+#define SD_WakeupCntrl 			0x02B
+#define SD_ClockCntrl			0x02C
+#define SD_TimeoutCntrl 		0x02E
+#define SD_SoftwareReset		0x02F
+#define SD_IntrStatus			0x030
+#define SD_ErrorIntrStatus 		0x032
+#define SD_IntrStatusEnable		0x034
+#define SD_ErrorIntrStatusEnable 	0x036
+#define SD_IntrSignalEnable		0x038
+#define SD_ErrorIntrSignalEnable 	0x03A
+#define SD_CMD12ErrorStatus		0x03C
+#define SD_Capabilities			0x040
+#define SD_Capabilities3		0x044
+#define SD_MaxCurCap			0x048
+#define SD_MaxCurCap_Reserved		0x04C
+#define SD_ADMA_ErrStatus		0x054
+#define SD_ADMA_SysAddr			0x58
+#define SD_SlotInterruptStatus		0x0FC
+#define SD_HostControllerVersion 	0x0FE
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo	0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2			0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart		0x060
+#define SD3_PresetValCount		8
+/* preset-indiv regs */
+#define SD3_PresetVal_init		0x060
+#define SD3_PresetVal_default	0x062
+#define SD3_PresetVal_HS		0x064
+#define SD3_PresetVal_SDR12		0x066
+#define SD3_PresetVal_SDR25		0x068
+#define SD3_PresetVal_SDR50		0x06a
+#define SD3_PresetVal_SDR104	0x06c
+#define SD3_PresetVal_DDR50		0x06e
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX	0
+#define SD3_PRESETVAL_DESPEED_IX	1
+#define SD3_PRESETVAL_HISPEED_IX	2
+#define SD3_PRESETVAL_SDR12_IX		3
+#define SD3_PRESETVAL_SDR25_IX		4
+#define SD3_PRESETVAL_SDR50_IX		5
+#define SD3_PRESETVAL_SDR104_IX		6
+#define SD3_PRESETVAL_DDR50_IX		7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M 	BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 	0
+#define CAP_TO_CLKUNIT_M  	BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 	7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+	bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M 		BITFIELD_MASK(8)
+#define CAP_BASECLK_S 		8
+#define CAP_MAXBLOCK_M 		BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S		16
+#define CAP_ADMA2_M		BITFIELD_MASK(1)
+#define CAP_ADMA2_S		19
+#define CAP_ADMA1_M		BITFIELD_MASK(1)
+#define CAP_ADMA1_S		20
+#define CAP_HIGHSPEED_M		BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S		21
+#define CAP_DMA_M		BITFIELD_MASK(1)
+#define CAP_DMA_S		22
+#define CAP_SUSPEND_M		BITFIELD_MASK(1)
+#define CAP_SUSPEND_S		23
+#define CAP_VOLT_3_3_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S		24
+#define CAP_VOLT_3_0_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S		25
+#define CAP_VOLT_1_8_M		BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S		26
+#define CAP_64BIT_HOST_M	BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S	28
+
+#define SDIO_OCR_READ_FAIL	(2)
+
+
+#define CAP_ASYNCINT_SUP_M	BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S	29
+
+#define CAP_SLOTTYPE_M		BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S		30
+
+#define CAP3_MSBits_OFFSET	(32)
+/* note: following are caps MSB32 bits.
+	So the bits start from 0, instead of 32. that is why
+	CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M		BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M	BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S	(33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M	BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S	(34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M		BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S	(36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S	(37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S	(38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M	BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S	(40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M	BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S	(45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M	BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S	(46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M		BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S		(48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M	BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S	14
+
+#define PRESET_CLK_DIV_M	BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S	0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S		0
+#define CAP_CURR_3_0_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S		8
+#define CAP_CURR_1_8_M		BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S		16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M		BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S		0
+#define BLKSZ_BNDRY_M		BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S		12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes  */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M   	BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S	0
+#define XFER_BLK_COUNT_EN_M 	BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S	1
+#define XFER_CMD_12_EN_M    	BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 	2
+#define XFER_DATA_DIRECTION_M	BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S	4
+#define XFER_MULTI_BLOCK_M	BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S	5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 		0
+#define RESP_TYPE_136  		1
+#define RESP_TYPE_48   		2
+#define RESP_TYPE_48_BUSY	3
+/* type field */
+#define CMD_TYPE_NORMAL		0
+#define CMD_TYPE_SUSPEND	1
+#define CMD_TYPE_RESUME		2
+#define CMD_TYPE_ABORT		3
+
+#define CMD_RESP_TYPE_M		BITFIELD_MASK(2)	/* Bits [0-1] 	- Response type */
+#define CMD_RESP_TYPE_S		0
+#define CMD_CRC_EN_M		BITFIELD_MASK(1)	/* Bit 3 	- CRC enable */
+#define CMD_CRC_EN_S		3
+#define CMD_INDEX_EN_M		BITFIELD_MASK(1)	/* Bit 4 	- Enable index checking */
+#define CMD_INDEX_EN_S		4
+#define CMD_DATA_EN_M		BITFIELD_MASK(1)	/* Bit 5 	- Using DAT line */
+#define CMD_DATA_EN_S		5
+#define CMD_TYPE_M		BITFIELD_MASK(2)	/* Bit [6-7] 	- Normal, abort, resume, etc
+							 */
+#define CMD_TYPE_S		6
+#define CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [8-13] 	- Command number */
+#define CMD_INDEX_S		8
+
+/* SD_BufferDataPort0	: Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 	: Offset 0x022, size = 2 bytes */
+/* SD_PresentState	: Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 0	May use CMD */
+#define PRES_CMD_INHIBIT_S	0
+#define PRES_DAT_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 1	May use DAT */
+#define PRES_DAT_INHIBIT_S	1
+#define PRES_DAT_BUSY_M		BITFIELD_MASK(1)	/* Bit 2	DAT is busy */
+#define PRES_DAT_BUSY_S		2
+#define PRES_PRESENT_RSVD_M	BITFIELD_MASK(5)	/* Bit [3-7]	rsvd */
+#define PRES_PRESENT_RSVD_S	3
+#define PRES_WRITE_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 8	Write is active */
+#define PRES_WRITE_ACTIVE_S	8
+#define PRES_READ_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 9	Read is active */
+#define PRES_READ_ACTIVE_S	9
+#define PRES_WRITE_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 10	Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S	10
+#define PRES_READ_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 11	Read buf data avail */
+#define PRES_READ_DATA_RDY_S	11
+#define PRES_CARD_PRESENT_M	BITFIELD_MASK(1)	/* Bit 16	Card present - debounced */
+#define PRES_CARD_PRESENT_S	16
+#define PRES_CARD_STABLE_M	BITFIELD_MASK(1)	/* Bit 17	Debugging */
+#define PRES_CARD_STABLE_S	17
+#define PRES_CARD_PRESENT_RAW_M	BITFIELD_MASK(1)	/* Bit 18	Not debounced */
+#define PRES_CARD_PRESENT_RAW_S	18
+#define PRES_WRITE_ENABLED_M	BITFIELD_MASK(1)	/* Bit 19	Write protected? */
+#define PRES_WRITE_ENABLED_S	19
+#define PRES_DAT_SIGNAL_M	BITFIELD_MASK(4)	/* Bit [20-23]	Debugging */
+#define PRES_DAT_SIGNAL_S	20
+#define PRES_CMD_SIGNAL_M	BITFIELD_MASK(1)	/* Bit 24	Debugging */
+#define PRES_CMD_SIGNAL_S	24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M		BITFIELD_MASK(1)	/* Bit 0	LED On/Off */
+#define HOST_LED_S		0
+#define HOST_DATA_WIDTH_M	BITFIELD_MASK(1)	/* Bit 1	4 bit enable */
+#define HOST_DATA_WIDTH_S	1
+#define HOST_HI_SPEED_EN_M	BITFIELD_MASK(1)	/* Bit 2	High speed vs low speed */
+#define HOST_DMA_SEL_S		3
+#define HOST_DMA_SEL_M		BITFIELD_MASK(2)	/* Bit 4:3	DMA Select */
+#define HOST_HI_SPEED_EN_S	2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S	15					/* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S	14					/* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S	7					/* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S	6					/* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M	BITFIELD_MASK(2)	/* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S	4					/* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S	3					/* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M	BITFIELD_MASK(3)	/* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S	0					/* bit# */
+
+#define HOST_CONTR_VER_2		(1)
+#define HOST_CONTR_VER_3		(2)
+
+/* misc defines */
+#define SD1_MODE 		0x1	/* SD Host Cntrlr Spec */
+#define SD4_MODE 		0x2	/* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M		BITFIELD_MASK(1)	/* Bit 0	Power the bus */
+#define PWR_BUS_EN_S		0
+#define PWR_VOLTS_M		BITFIELD_MASK(3)	/* Bit [1-3]	Voltage Select */
+#define PWR_VOLTS_S		1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M		BITFIELD_MASK(1)	/* Bit 0	Reset All */
+#define SW_RESET_ALL_S		0
+#define SW_RESET_CMD_M		BITFIELD_MASK(1)	/* Bit 1	CMD Line Reset */
+#define SW_RESET_CMD_S		1
+#define SW_RESET_DAT_M		BITFIELD_MASK(1)	/* Bit 2	DAT Line Reset */
+#define SW_RESET_DAT_S		2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M		BITFIELD_MASK(1)	/* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S		0
+#define INTSTAT_XFER_COMPLETE_M		BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S		1
+#define INTSTAT_BLOCK_GAP_EVENT_M	BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S	2
+#define INTSTAT_DMA_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S		3
+#define INTSTAT_BUF_WRITE_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S	4
+#define INTSTAT_BUF_READ_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S	5
+#define INTSTAT_CARD_INSERTION_M	BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S	6
+#define INTSTAT_CARD_REMOVAL_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S		7
+#define INTSTAT_CARD_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S		8
+#define INTSTAT_RETUNING_INT_M		BITFIELD_MASK(1)	/* Bit 12 */
+#define INTSTAT_RETUNING_INT_S		12
+#define INTSTAT_ERROR_INT_M		BITFIELD_MASK(1)	/* Bit 15 */
+#define INTSTAT_ERROR_INT_S		15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S		0
+#define ERRINT_CMD_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S		1
+#define ERRINT_CMD_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S		2
+#define ERRINT_CMD_INDEX_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S		3
+#define ERRINT_DATA_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S		4
+#define ERRINT_DATA_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S		5
+#define ERRINT_DATA_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S		6
+#define ERRINT_CURRENT_LIMIT_M		BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S		7
+#define ERRINT_AUTO_CMD12_M		BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S		8
+#define ERRINT_VENDOR_M			BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S			12
+#define ERRINT_ADMA_M			BITFIELD_MASK(1)
+#define ERRINT_ADMA_S			9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT		0x0001
+#define ERRINT_CMD_CRC_BIT		0x0002
+#define ERRINT_CMD_ENDBIT_BIT		0x0004
+#define ERRINT_CMD_INDEX_BIT		0x0008
+#define ERRINT_DATA_TIMEOUT_BIT		0x0010
+#define ERRINT_DATA_CRC_BIT		0x0020
+#define ERRINT_DATA_ENDBIT_BIT		0x0040
+#define ERRINT_CURRENT_LIMIT_BIT	0x0080
+#define ERRINT_AUTO_CMD12_BIT		0x0100
+#define ERRINT_ADMA_BIT		0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS		(ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+				 ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS	(ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+				 ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS	(ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl	: Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl 	: Offset 0x02E , size = bytes */
+/* SD_IntrStatus	: Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus 	: Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable	: Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable	: Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus	: Offset 0x03C , size = bytes */
+/* SD_Capabilities	: Offset 0x040 , size = bytes */
+/* SD_MaxCurCap		: Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 0000000..2c5bcf9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h,v 13.9 2009-12-08 22:30:15 Exp $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+	int func;
+	int offset;
+	int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL		0x0001	/* Error */
+#define SDH_TRACE_VAL		0x0002	/* Trace */
+#define SDH_INFO_VAL		0x0004	/* Info */
+#define SDH_DEBUG_VAL		0x0008	/* Debug */
+#define SDH_DATA_VAL		0x0010	/* Data */
+#define SDH_CTRL_VAL		0x0020	/* Control Regs */
+#define SDH_LOG_VAL		0x0040	/* Enable bcmlog */
+#define SDH_DMA_VAL		0x0080	/* DMA */
+
+#define NUM_PREV_TRANSACTIONS	16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 0000000..c5a3383
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,247 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h,v 13.251.2.10 2011-02-04 05:06:32 Exp $
+ */
+
+
+#ifndef	_siutils_h_
+#define	_siutils_h_
+
+
+struct si_pub {
+	uint	socitype;		
+
+	uint	bustype;		
+	uint	buscoretype;		
+	uint	buscorerev;		
+	uint	buscoreidx;		
+	int	ccrev;			
+	uint32	cccaps;			
+	uint32  cccaps_ext;			
+	int	pmurev;			
+	uint32	pmucaps;		
+	uint	boardtype;		
+	uint	boardvendor;		
+	uint	boardflags;		
+	uint	boardflags2;		
+	uint	chip;			
+	uint	chiprev;		
+	uint	chippkg;		
+	uint32	chipst;			
+	bool	issim;			
+	uint    socirev;		
+	bool	pci_pr32414;
+
+};
+
+
+typedef const struct si_pub si_t;
+
+
+#define	SI_OSH		NULL	
+
+#define	BADIDX		(SI_MAXCORES + 1)
+
+
+#define	XTAL			0x1	
+#define	PLL			0x2	
+
+
+#define	CLK_FAST		0	
+#define	CLK_DYNAMIC		2	
+
+
+#define GPIO_DRV_PRIORITY	0	
+#define GPIO_APP_PRIORITY	1	
+#define GPIO_HI_PRIORITY	2	
+
+
+#define GPIO_PULLUP		0
+#define GPIO_PULLDN		1
+
+
+#define GPIO_REGEVT		0	
+#define GPIO_REGEVT_INTMSK	1	
+#define GPIO_REGEVT_INTPOL	2	
+
+
+#define SI_DEVPATH_BUFSZ	16	
+
+
+#define	SI_DOATTACH	1
+#define SI_PCIDOWN	2
+#define SI_PCIUP	3
+
+#define	ISSIM_ENAB(sih)	0
+
+
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih)	(BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih)	((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih)		(0)
+#define CCPLL_ENAB(sih)		(0)
+#else
+#define CCCTL_ENAB(sih)		((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih)		((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+
+
+
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+	void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect);
+extern bool si_socdevram_pkg(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+
+
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+#define si_eci_init(sih) (0)
+#define si_eci_notify_bt(sih, type, val)  (0)
+
+
+
+extern int si_devpath(si_t *sih, char *path, int size);
+
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
+
+
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 0000000..397006a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,52 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h,v 13.15.108.2 2010-11-15 17:57:30 Exp $
+ */
+
+#ifndef	_TRX_HDR_H_
+#define	_TRX_HDR_H_
+
+#include <typedefs.h>
+
+#define TRX_MAGIC	0x30524448	/* "HDR0" */
+#define TRX_VERSION	1		/* Version 1 */
+#define TRX_MAX_LEN	0x3B0000	/* Max length */
+#define TRX_NO_HEADER	1		/* Do not write TRX header */
+#define TRX_GZ_FILES	0x2     /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_OVERLAYS	0x4     /* Contains an overlay header after the trx header */
+#define TRX_MAX_OFFSET	3		/* Max number of individual files */
+#define TRX_UNCOMP_IMAGE	0x20	/* Trx contains uncompressed rtecdc.bin image */
+
+struct trx_header {
+	uint32 magic;		/* "HDR0" */
+	uint32 len;		/* Length of file including header */
+	uint32 crc32;		/* 32-bit CRC from flag_version to end of file */
+	uint32 flag_version;	/* 0:15 flags, 16:31 version */
+	uint32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of header */
+};
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 0000000..228b5dc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h,v 1.103.2.1 2010-05-11 18:19:28 Exp $
+ */
+
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+
+
+#include "site_typedefs.h"
+
+#else
+
+
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE	false
+#endif
+#ifndef TRUE
+#define TRUE	true
+#endif
+
+#else	
+
+
+#endif	
+
+#if defined(__x86_64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif 
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif	
+
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif	
+#endif	
+#endif  
+
+
+
+
+
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif
+
+
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif 
+
+#if !defined(__DJGPP__)
+
+
+#if defined(__KERNEL__)
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>	
+#endif 
+
+#else
+
+
+#include <sys/types.h>
+
+#endif 
+
+#endif 
+
+
+
+
+#define USE_TYPEDEF_DEFAULTS
+
+#endif 
+
+
+
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef	 unsigned char	bool;
+#endif
+
+
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char	uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short	ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int	uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long	ulong;
+#endif
+
+
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char	uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short	uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int	uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int	uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char	int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short	int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int	int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT32
+typedef float		float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double		float64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else 
+typedef float64 float_t;
+#endif
+
+#endif 
+
+
+
+#ifndef FALSE
+#define FALSE	0
+#endif
+
+#ifndef TRUE
+#define TRUE	1  
+#endif
+
+#ifndef NULL
+#define	NULL	0
+#endif
+
+#ifndef OFF
+#define	OFF	0
+#endif
+
+#ifndef ON
+#define	ON	1  
+#endif
+
+#define	AUTO	(-1) 
+
+
+
+#ifndef PTRSZ
+#define	PTRSZ	sizeof(char*)
+#endif
+
+
+
+#if defined(__GNUC__)
+	#define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+	#define BWL_COMPILER_ARMCC
+#else
+	#error "Unknown compiler!"
+#endif 
+
+
+#ifndef INLINE
+	#if defined(BWL_COMPILER_MICROSOFT)
+		#define INLINE __inline
+	#elif defined(BWL_COMPILER_GNU)
+		#define INLINE __inline__
+	#elif defined(BWL_COMPILER_ARMCC)
+		#define INLINE	__inline
+	#else
+		#define INLINE
+	#endif 
+#endif 
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif 
+
+
+#define UNUSED_PARAMETER(x) (void)(x)
+
+
+#include <bcmdefs.h>
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 0000000..7230d3b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,198 @@
+/*
+* Copyright (C) 1999-2011, Broadcom Corporation
+* 
+*         Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: wlfc_proto.h,v 1.1.6.2 2010-05-08 01:30:41 Exp $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+	/* Use TLV to convey WLFC information.
+	 ---------------------------------------------------------------------------
+	| Type |  Len | value                    | Description
+	 ---------------------------------------------------------------------------
+	|  1   |   1  | (handle)                 | MAC OPEN
+	 ---------------------------------------------------------------------------
+	|  2   |   1  | (handle)                 | MAC CLOSE
+	 ---------------------------------------------------------------------------
+	|  3   |   2  | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+	 ---------------------------------------------------------------------------
+	|  4   |   4  | see pkttag comments      | TXSTATUS
+	 ---------------------------------------------------------------------------
+	|  5   |   4  | see pkttag comments      | PKKTTAG [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  6   |   8  | (handle, ifid, MAC)      | MAC ADD
+	 ---------------------------------------------------------------------------
+	|  7   |   8  | (handle, ifid, MAC)      | MAC DEL
+	 ---------------------------------------------------------------------------
+	|  8   |   1  | (rssi)                   | RSSI - RSSI value for the packet.
+	 ---------------------------------------------------------------------------
+	|  9   |   1  | (interface ID)           | Interface OPEN
+	 ---------------------------------------------------------------------------
+	|  10  |   1  | (interface ID)           | Interface CLOSE
+	 ---------------------------------------------------------------------------
+	|  11  |   8  | fifo credit returns map  | FIFO credits back to the host
+	|      |      |                          |
+	|      |      |                          | --------------------------------------
+	|      |      |                          | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+	|      |      |                          | --------------------------------------
+	|      |      |                          |
+	 ---------------------------------------------------------------------------
+	|  12  |   2  | MAC handle,              | Host provides a bitmap of pending
+	|      |      | AC[0-3] traffic bitmap   | unicast traffic for MAC-handle dstn.
+	|      |      |                          | [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  13  |   3  | (count, handle, prec_bmp)| One time request for packet to a specific
+	|      |      |                          | MAC destination.
+	 ---------------------------------------------------------------------------
+	| 255  |  N/A |  N/A                     | FILLER - This is a special type
+	|      |      |                          | that has no length or value.
+	|      |      |                          | Typically used for padding.
+	 ---------------------------------------------------------------------------
+	*/
+
+#define WLFC_CTL_TYPE_MAC_OPEN				1
+#define WLFC_CTL_TYPE_MAC_CLOSE				2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT	3
+#define WLFC_CTL_TYPE_TXSTATUS				4
+#define WLFC_CTL_TYPE_PKTTAG				5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD			6
+#define WLFC_CTL_TYPE_MACDESC_DEL			7
+#define WLFC_CTL_TYPE_RSSI					8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN		9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE		10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK		11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP	12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET	13
+
+#define WLFC_CTL_TYPE_FILLER				255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC			8 /* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC		1	/* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI		1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE	1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP	2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS	4
+#define WLFC_CTL_VALUE_LEN_PKTTAG	4
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK	6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
+
+
+
+#define WLFC_PKTID_GEN_MASK		0x80000000
+#define WLFC_PKTID_GEN_SHIFT	31
+
+#define WLFC_PKTID_GEN(x)	(((x) & WLFC_PKTID_GEN_MASK) >> WLFC_PKTID_GEN_SHIFT)
+#define WLFC_PKTID_SETGEN(x, gen)	(x) = ((x) & ~WLFC_PKTID_GEN_MASK) | \
+	(((gen) << WLFC_PKTID_GEN_SHIFT) & WLFC_PKTID_GEN_MASK)
+
+#define WLFC_PKTFLAG_PKTFROMHOST	0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED	0x02
+
+#define WL_TXSTATUS_FLAGS_MASK			0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT			27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x)		(((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+	WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK			0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT			24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x)		(((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK			0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num)	((x) = \
+	((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x)		((x) & WL_TXSTATUS_PKTID_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE	32
+#define WLFC_MAX_IFNUM				16
+#define WLFC_MAC_DESC_ID_INVALID	0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x)	(x) |= \
+	(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x)	(x) &= \
+	~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WL_TXSTATUS_GENERATION_MASK			1
+#define WL_TXSTATUS_GENERATION_SHIFT		31
+
+#define WLFC_PKTFLAG_SET_GENERATION(x, gen)	((x) = \
+	((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+	(((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WLFC_PKTFLAG_GENERATION(x)	(((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+	WL_TXSTATUS_GENERATION_MASK)
+
+#define WLFC_MAX_PENDING_DATALEN	120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD		0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS	1
+/* WL firmware suppressed a packet because MAC is 
+	already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS		2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC	3
+
+#define WLFC_D11_STATUS_INTERPRET(txs)	((((txs)->status & TX_STATUS_SUPR_MASK) >> \
+		TX_STATUS_SUPR_SHIFT)) ? WLFC_CTL_PKTFLAG_D11SUPPRESS : WLFC_CTL_PKTFLAG_DISCARD
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+	{printf("WLFC: %s():%d:caller:%p\n", \
+	__FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+	banner, ea[0], 	ea[1], 	ea[2], 	ea[3], 	ea[4], 	ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 0000000..2f5e9e1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,2687 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h,v 1.767.2.38 2011-02-01 23:04:28 Exp $
+ */
+
+
+#ifndef _wlioctl_h_
+#define _wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <bcmwifi.h>
+
+#include <bcmcdc.h>
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ   16
+#endif
+
+
+typedef struct remote_ioctl {
+	cdc_ioctl_t     msg;
+	uint        data_len;
+	char            intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+
+#define ACTION_FRAME_SIZE 1040
+
+typedef struct wl_action_frame {
+	struct ether_addr   da;
+	uint16          len;
+	uint32          packetId;
+	uint8           data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+	uint8		ssid_len;
+	uint8		ssid[32];
+} ssid_info_t;
+
+typedef struct wl_af_params {
+	uint32          channel;
+	int32           dwell_time;
+	struct ether_addr   BSSID;
+	wl_action_frame_t   action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+
+
+
+#define LEGACY2_WL_BSS_INFO_VERSION 108     
+
+
+typedef struct wl_bss_info_108 {
+	uint32      version;        
+	uint32      length;         
+	struct ether_addr BSSID;
+	uint16      beacon_period;      
+	uint16      capability;     
+	uint8       SSID_len;
+	uint8       SSID[32];
+	struct {
+		uint    count;          
+		uint8   rates[16];      
+	} rateset;              
+	chanspec_t  chanspec;       
+	uint16      atim_window;        
+	uint8       dtim_period;        
+	int16       RSSI;           
+	int8        phy_noise;      
+
+	uint8       n_cap;          
+	uint32      nbss_cap;       
+	uint8       ctl_ch;         
+	uint32      reserved32[1];      
+	uint8       flags;          
+	uint8       reserved[3];        
+	uint8       basic_mcs[MCSSET_LEN];  
+
+	uint16      ie_offset;      
+	uint32      ie_length;      
+	
+	
+} wl_bss_info_108_t;
+
+#define WL_BSS_INFO_VERSION 109     
+
+
+typedef struct wl_bss_info {
+	uint32      version;        
+	uint32      length;         
+	struct ether_addr BSSID;
+	uint16      beacon_period;      
+	uint16      capability;     
+	uint8       SSID_len;
+	uint8       SSID[32];
+	struct {
+		uint    count;          
+		uint8   rates[16];      
+	} rateset;              
+	chanspec_t  chanspec;       
+	uint16      atim_window;        
+	uint8       dtim_period;        
+	int16       RSSI;           
+	int8        phy_noise;      
+
+	uint8       n_cap;          
+	uint32      nbss_cap;       
+	uint8       ctl_ch;         
+	uint32      reserved32[1];      
+	uint8       flags;          
+	uint8       reserved[3];        
+	uint8       basic_mcs[MCSSET_LEN];  
+
+	uint16      ie_offset;      
+	uint32      ie_length;      
+	int16       SNR;            
+	
+	
+} wl_bss_info_t;
+
+typedef struct wl_bsscfg {
+	uint32  wsec;
+	uint32  WPA_auth;
+	uint32  wsec_index;
+	uint32  associated;
+	uint32  BSS;
+	uint32  phytest_on;
+	struct ether_addr   prev_BSSID;
+	struct ether_addr   BSSID;
+} wl_bsscfg_t;
+
+typedef struct wl_bss_config {
+	uint32  atim_window;
+	uint32  beacon_period;
+	uint32  chanspec;
+} wl_bss_config_t;
+
+
+typedef struct wlc_ssid {
+	uint32      SSID_len;
+	uchar       SSID[32];
+} wlc_ssid_t;
+
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY   2
+
+
+#define WL_SCANFLAGS_PASSIVE 0x01       
+#define WL_SCANFLAGS_RESERVED 0x02      
+#define WL_SCANFLAGS_PROHIBITED 0x04    
+
+typedef struct wl_scan_params {
+	wlc_ssid_t ssid;        
+	struct ether_addr bssid;    
+	int8 bss_type;          
+	uint8 scan_type;        
+	int32 nprobes;          
+	int32 active_time;      
+	int32 passive_time;     
+	int32 home_time;        
+	int32 channel_num;      
+	uint16 channel_list[1];     
+} wl_scan_params_t;
+
+
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START      1
+#define WL_SCAN_ACTION_CONTINUE   2
+#define WL_SCAN_ACTION_ABORT      3
+
+#define ISCAN_REQ_VERSION 1
+
+
+typedef struct wl_iscan_params {
+	uint32 version;
+	uint16 action;
+	uint16 scan_duration;
+	wl_scan_params_t params;
+} wl_iscan_params_t;
+
+
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM  4
+
+
+#define DNGL_RXCTXT_SIZE	45
+
+#if defined(SIMPLE_ISCAN)
+#define ISCAN_RETRY_CNT   5
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+#define ISCAN_STATE_PENDING 2
+
+
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+#endif 
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+	uint32 version;
+	uint16 action;
+	uint16 sync_id;
+	wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+	uint32 buflen;
+	uint32 version;
+	uint16 sync_id;
+	uint16 bss_count;
+	wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+
+typedef struct wl_iscan_results {
+	uint32 status;
+	wl_scan_results_t results;
+} wl_iscan_results_t;
+
+
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+	(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+typedef struct wl_probe_params {
+	wlc_ssid_t ssid;
+	struct ether_addr bssid;
+	struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_NUMRATES     16  
+typedef struct wl_rateset {
+	uint32  count;          
+	uint8   rates[WL_NUMRATES]; 
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+	uint32  count;          
+	uint8   rates[WL_NUMRATES]; 
+	uint8   mcs[MCSSET_LEN];        
+} wl_rateset_args_t;
+
+
+typedef struct wl_uint32_list {
+	
+	uint32 count;
+	
+	uint32 element[1];
+} wl_uint32_list_t;
+
+
+typedef struct wl_assoc_params {
+	struct ether_addr bssid;    
+	uint16 bssid_cnt;       
+	int32 chanspec_num;     
+	chanspec_t chanspec_list[1];    
+} wl_assoc_params_t;
+#define WL_ASSOC_PARAMS_FIXED_SIZE  (sizeof(wl_assoc_params_t) - sizeof(chanspec_t))
+
+
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE    WL_ASSOC_PARAMS_FIXED_SIZE
+
+
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE        WL_ASSOC_PARAMS_FIXED_SIZE
+
+
+typedef struct wl_join_params {
+	wlc_ssid_t ssid;
+	wl_assoc_params_t params;   
+} wl_join_params_t;
+#define WL_JOIN_PARAMS_FIXED_SIZE   (sizeof(wl_join_params_t) - sizeof(chanspec_t))
+
+
+typedef struct wl_join_scan_params {
+	uint8 scan_type;                
+	int32 nprobes;                  
+	int32 active_time;              
+	int32 passive_time;             
+	int32 home_time;                
+} wl_join_scan_params_t;
+
+
+typedef struct wl_extjoin_params {
+	wlc_ssid_t ssid;                
+	wl_join_scan_params_t scan;
+	wl_join_assoc_params_t assoc;   
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE    (sizeof(wl_extjoin_params_t) - sizeof(chanspec_t))
+
+typedef struct {
+	uint32 num;
+	chanspec_t list[1];
+} chanspec_list_t;
+
+
+#define NRATE_MCS_INUSE 0x00000080  
+#define NRATE_RATE_MASK 0x0000007f  
+#define NRATE_STF_MASK  0x0000ff00  
+#define NRATE_STF_SHIFT 8       
+#define NRATE_OVERRIDE  0x80000000  
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 
+#define NRATE_SGI_MASK  0x00800000      
+#define NRATE_SGI_SHIFT 23              
+#define NRATE_LDPC_CODING 0x00400000    
+#define NRATE_LDPC_SHIFT 22             
+
+#define NRATE_STF_SISO  0       
+#define NRATE_STF_CDD   1       
+#define NRATE_STF_STBC  2       
+#define NRATE_STF_SDM   3       
+
+#define ANTENNA_NUM_1   1       
+#define ANTENNA_NUM_2   2
+#define ANTENNA_NUM_3   3
+#define ANTENNA_NUM_4   4
+
+#define ANT_SELCFG_AUTO     0x80    
+#define ANT_SELCFG_MASK     0x33    
+#define ANT_SELCFG_MAX      4   
+#define ANT_SELCFG_TX_UNICAST   0   
+#define ANT_SELCFG_RX_UNICAST   1   
+#define ANT_SELCFG_TX_DEF   2   
+#define ANT_SELCFG_RX_DEF   3   
+
+#define MAX_STREAMS_SUPPORTED   4   
+
+typedef struct {
+	uint8 ant_config[ANT_SELCFG_MAX];   
+	uint8 num_antcfg;   
+} wlc_antselcfg_t;
+
+#define HIGHEST_SINGLE_STREAM_MCS   7 
+
+#define MAX_CCA_CHANNELS 38 
+#define MAX_CCA_SECS     60 
+
+#define IBSS_MED        15  
+#define IBSS_HI         25  
+#define OBSS_MED        12
+#define OBSS_HI         25
+#define INTERFER_MED    5
+#define INTERFER_HI     10
+
+#define  CCA_FLAG_2G_ONLY       0x01    
+#define  CCA_FLAG_5G_ONLY       0x02    
+#define  CCA_FLAG_IGNORE_DURATION   0x04    
+#define  CCA_FLAGS_PREFER_1_6_11    0x10
+#define  CCA_FLAG_IGNORE_INTERFER   0x20 
+
+#define CCA_ERRNO_BAND      1   
+#define CCA_ERRNO_DURATION  2   
+#define CCA_ERRNO_PREF_CHAN 3   
+#define CCA_ERRNO_INTERFER  4   
+#define CCA_ERRNO_TOO_FEW   5   
+
+typedef struct {
+	uint32 duration;    
+	uint32 congest_ibss;    
+				
+	uint32 congest_obss;    
+	uint32 interference;    
+	uint32 timestamp;   
+} cca_congest_t;
+
+typedef struct {
+	chanspec_t chanspec;    
+	uint8 num_secs;     
+	cca_congest_t  secs[1]; 
+} cca_congest_channel_req_t;
+
+#define WLC_CNTRY_BUF_SZ    4       
+
+typedef struct wl_country {
+	char country_abbrev[WLC_CNTRY_BUF_SZ];  
+	int32 rev;              
+	char ccode[WLC_CNTRY_BUF_SZ];       
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+	uint32 buflen;
+	uint32 band;
+	char country_abbrev[WLC_CNTRY_BUF_SZ];
+	uint32 count;
+	uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+	uint32 buflen;
+	uint32 band_set;
+	uint32 band;
+	uint32 count;
+	char country_abbrev[1];
+} wl_country_list_t;
+
+#define WL_NUM_RPI_BINS     8
+#define WL_RM_TYPE_BASIC    1
+#define WL_RM_TYPE_CCA      2
+#define WL_RM_TYPE_RPI      3
+
+#define WL_RM_FLAG_PARALLEL (1<<0)
+
+#define WL_RM_FLAG_LATE     (1<<1)
+#define WL_RM_FLAG_INCAPABLE    (1<<2)
+#define WL_RM_FLAG_REFUSED  (1<<3)
+
+typedef struct wl_rm_req_elt {
+	int8    type;
+	int8    flags;
+	chanspec_t  chanspec;
+	uint32  token;      
+	uint32  tsf_h;      
+	uint32  tsf_l;      
+	uint32  dur;        
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+	uint32  token;      
+	uint32  count;      
+	void    *cb;        
+	void    *cb_arg;    
+	wl_rm_req_elt_t req[1]; 
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+	int8    type;
+	int8    flags;
+	chanspec_t  chanspec;
+	uint32  token;      
+	uint32  tsf_h;      
+	uint32  tsf_l;      
+	uint32  dur;        
+	uint32  len;        
+	uint8   data[1];    
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN 24  
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+	uint8   rpi[WL_RPI_REP_BIN_NUM];
+	int8    rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+	uint32  token;      
+	uint32  len;        
+	wl_rm_rep_elt_t rep[1]; 
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN 8
+
+
+typedef enum sup_auth_status {
+	
+	WLC_SUP_DISCONNECTED = 0,
+	WLC_SUP_CONNECTING,
+	WLC_SUP_IDREQUIRED,
+	WLC_SUP_AUTHENTICATING,
+	WLC_SUP_AUTHENTICATED,
+	WLC_SUP_KEYXCHANGE,
+	WLC_SUP_KEYED,
+	WLC_SUP_TIMEOUT,
+	WLC_SUP_LAST_BASIC_STATE,
+
+	
+	
+	WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+	
+	WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+	
+	WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+	WLC_SUP_KEYXCHANGE_PREP_M4, 
+	WLC_SUP_KEYXCHANGE_WAIT_G1, 
+	WLC_SUP_KEYXCHANGE_PREP_G2  
+} sup_auth_status_t;
+
+
+#define CRYPTO_ALGO_OFF         0
+#define CRYPTO_ALGO_WEP1        1
+#define CRYPTO_ALGO_TKIP        2
+#define CRYPTO_ALGO_WEP128      3
+#define CRYPTO_ALGO_AES_CCM     4
+#define CRYPTO_ALGO_AES_OCB_MSDU    5
+#define CRYPTO_ALGO_AES_OCB_MPDU    6
+#define CRYPTO_ALGO_NALG        7
+
+#define WSEC_GEN_MIC_ERROR  0x0001
+#define WSEC_GEN_REPLAY     0x0002
+#define WSEC_GEN_ICV_ERROR  0x0004
+
+#define WL_SOFT_KEY (1 << 0)    
+#define WL_PRIMARY_KEY  (1 << 1)    
+#define WL_KF_RES_4 (1 << 4)    
+#define WL_KF_RES_5 (1 << 5)    
+#define WL_IBSS_PEER_GROUP_KEY  (1 << 6)    
+
+typedef struct wl_wsec_key {
+	uint32      index;      
+	uint32      len;        
+	uint8       data[DOT11_MAX_KEY_SIZE];   
+	uint32      pad_1[18];
+	uint32      algo;       
+	uint32      flags;      
+	uint32      pad_2[2];
+	int     pad_3;
+	int     iv_initialized; 
+	int     pad_4;
+	
+	struct {
+		uint32  hi;     
+		uint16  lo;     
+	} rxiv;
+	uint32      pad_5[2];
+	struct ether_addr ea;       
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN    8
+#define WSEC_MAX_PSK_LEN    64
+
+
+#define WSEC_PASSPHRASE     (1<<0)
+
+
+typedef struct {
+	ushort  key_len;        
+	ushort  flags;          
+	uint8   key[WSEC_MAX_PSK_LEN];  
+} wsec_pmk_t;
+
+
+#define WEP_ENABLED     0x0001
+#define TKIP_ENABLED        0x0002
+#define AES_ENABLED     0x0004
+#define WSEC_SWFLAG     0x0008
+#define SES_OW_ENABLED      0x0040  
+
+
+#define WPA_AUTH_DISABLED   0x0000  
+#define WPA_AUTH_NONE       0x0001  
+#define WPA_AUTH_UNSPECIFIED    0x0002  
+#define WPA_AUTH_PSK        0x0004  
+ 
+#define WPA2_AUTH_UNSPECIFIED   0x0040  
+#define WPA2_AUTH_PSK       0x0080  
+#define BRCM_AUTH_PSK           0x0100  
+#define BRCM_AUTH_DPT       0x0200  
+
+
+#define MAXPMKID        16
+
+typedef struct _pmkid {
+	struct ether_addr   BSSID;
+	uint8           PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+	uint32  npmkid;
+	pmkid_t pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+	struct ether_addr   BSSID;
+	uint8           preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+	uint32  npmkid_cand;
+	pmkid_cand_t    pmkid_cand[1];
+} pmkid_cand_list_t;
+
+typedef struct wl_assoc_info {
+	uint32      req_len;
+	uint32      resp_len;
+	uint32      flags;
+	struct dot11_assoc_req req;
+	struct ether_addr reassoc_bssid;
+	struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01
+
+
+
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+
+typedef struct {
+	uint32  val;
+	struct ether_addr ea;
+} scb_val_t;
+
+
+typedef struct {
+	uint32 code;
+	scb_val_t ioctl_args;
+} authops_t;
+
+
+typedef struct channel_info {
+	int hw_channel;
+	int target_channel;
+	int scan_channel;
+} channel_info_t;
+
+
+struct maclist {
+	uint count;         
+	struct ether_addr ea[1];    
+};
+
+
+typedef struct get_pktcnt {
+	uint rx_good_pkt;
+	uint rx_bad_pkt;
+	uint tx_good_pkt;
+	uint tx_bad_pkt;
+	uint rx_ocast_good_pkt; 
+} get_pktcnt_t;
+
+#define WL_IOCTL_ACTION_GET				0x0
+#define WL_IOCTL_ACTION_SET				0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK	0x1e
+#define WL_IOCTL_ACTION_OVL_RSV			0x20
+#define WL_IOCTL_ACTION_OVL				0x40
+#define WL_IOCTL_ACTION_MASK			0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT		1
+
+
+typedef struct wl_ioctl {
+	uint cmd;   
+	void *buf;  
+	uint len;   
+	uint8 set;		
+	uint used;  
+	uint needed;    
+} wl_ioctl_t;
+
+
+#define ioctl_subtype   set     
+#define ioctl_pid   used        
+#define ioctl_status    needed      
+
+
+typedef struct wlc_rev_info {
+	uint        vendorid;   
+	uint        deviceid;   
+	uint        radiorev;   
+	uint        chiprev;    
+	uint        corerev;    
+	uint        boardid;    
+	uint        boardvendor;    
+	uint        boardrev;   
+	uint        driverrev;  
+	uint        ucoderev;   
+	uint        bus;        
+	uint        chipnum;    
+	uint        phytype;    
+	uint        phyrev;     
+	uint        anarev;     
+	uint        chippkg;    
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH   48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+	uint instance;
+	char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+
+typedef struct wl_txfifo_sz {
+	uint16  magic;
+	uint16  fifo;
+	uint16  size;
+} wl_txfifo_sz_t;
+
+#define WL_TXFIFO_SZ_MAGIC  0xa5a5
+
+
+
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+	uint8 module;
+	uint8 type;
+	char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+
+#define WLC_IOCTL_MAGIC     0x14e46c77
+
+
+#define WLC_IOCTL_VERSION   1
+
+#define WLC_IOCTL_MAXLEN        8192    
+#define WLC_IOCTL_SMLEN         256 
+#define WLC_IOCTL_MEDLEN        1536    
+#ifdef WLC_HIGH_ONLY
+#define WLC_SAMPLECOLLECT_MAXLEN    1024    
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN    10240   
+#endif 
+
+
+#define WLC_GET_MAGIC               0
+#define WLC_GET_VERSION             1
+#define WLC_UP                  2
+#define WLC_DOWN                3
+#define WLC_GET_LOOP                4
+#define WLC_SET_LOOP                5
+#define WLC_DUMP                6
+#define WLC_GET_MSGLEVEL            7
+#define WLC_SET_MSGLEVEL            8
+#define WLC_GET_PROMISC             9
+#define WLC_SET_PROMISC             10
+#define WLC_OVERLAY_IOCTL           11
+#define WLC_GET_RATE                12
+ 
+#define WLC_GET_INSTANCE            14
+ 
+ 
+ 
+ 
+#define WLC_GET_INFRA               19
+#define WLC_SET_INFRA               20
+#define WLC_GET_AUTH                21
+#define WLC_SET_AUTH                22
+#define WLC_GET_BSSID               23
+#define WLC_SET_BSSID               24
+#define WLC_GET_SSID                25
+#define WLC_SET_SSID                26
+#define WLC_RESTART             27
+ 
+#define WLC_GET_CHANNEL             29
+#define WLC_SET_CHANNEL             30
+#define WLC_GET_SRL             31
+#define WLC_SET_SRL             32
+#define WLC_GET_LRL             33
+#define WLC_SET_LRL             34
+#define WLC_GET_PLCPHDR             35
+#define WLC_SET_PLCPHDR             36
+#define WLC_GET_RADIO               37
+#define WLC_SET_RADIO               38
+#define WLC_GET_PHYTYPE             39
+#define WLC_DUMP_RATE               40
+#define WLC_SET_RATE_PARAMS         41
+#define WLC_GET_FIXRATE             42
+#define WLC_SET_FIXRATE             43
+ 
+ 
+#define WLC_GET_KEY             44
+#define WLC_SET_KEY             45
+#define WLC_GET_REGULATORY          46
+#define WLC_SET_REGULATORY          47
+#define WLC_GET_PASSIVE_SCAN            48
+#define WLC_SET_PASSIVE_SCAN            49
+#define WLC_SCAN                50
+#define WLC_SCAN_RESULTS            51
+#define WLC_DISASSOC                52
+#define WLC_REASSOC             53
+#define WLC_GET_ROAM_TRIGGER            54
+#define WLC_SET_ROAM_TRIGGER            55
+#define WLC_GET_ROAM_DELTA          56
+#define WLC_SET_ROAM_DELTA          57
+#define WLC_GET_ROAM_SCAN_PERIOD        58
+#define WLC_SET_ROAM_SCAN_PERIOD        59
+#define WLC_EVM                 60  
+#define WLC_GET_TXANT               61
+#define WLC_SET_TXANT               62
+#define WLC_GET_ANTDIV              63
+#define WLC_SET_ANTDIV              64
+ 
+ 
+#define WLC_GET_CLOSED              67
+#define WLC_SET_CLOSED              68
+#define WLC_GET_MACLIST             69
+#define WLC_SET_MACLIST             70
+#define WLC_GET_RATESET             71
+#define WLC_SET_RATESET             72
+ 
+#define WLC_LONGTRAIN               74
+#define WLC_GET_BCNPRD              75
+#define WLC_SET_BCNPRD              76
+#define WLC_GET_DTIMPRD             77
+#define WLC_SET_DTIMPRD             78
+#define WLC_GET_SROM                79
+#define WLC_SET_SROM                80
+#define WLC_GET_WEP_RESTRICT            81
+#define WLC_SET_WEP_RESTRICT            82
+#define WLC_GET_COUNTRY             83
+#define WLC_SET_COUNTRY             84
+#define WLC_GET_PM              85
+#define WLC_SET_PM              86
+#define WLC_GET_WAKE                87
+#define WLC_SET_WAKE                88
+ 
+#define WLC_GET_FORCELINK           90  
+#define WLC_SET_FORCELINK           91  
+#define WLC_FREQ_ACCURACY           92  
+#define WLC_CARRIER_SUPPRESS            93  
+#define WLC_GET_PHYREG              94
+#define WLC_SET_PHYREG              95
+#define WLC_GET_RADIOREG            96
+#define WLC_SET_RADIOREG            97
+#define WLC_GET_REVINFO             98
+#define WLC_GET_UCANTDIV            99
+#define WLC_SET_UCANTDIV            100
+#define WLC_R_REG               101
+#define WLC_W_REG               102
+
+ 
+#define WLC_GET_MACMODE             105
+#define WLC_SET_MACMODE             106
+#define WLC_GET_MONITOR             107
+#define WLC_SET_MONITOR             108
+#define WLC_GET_GMODE               109
+#define WLC_SET_GMODE               110
+#define WLC_GET_LEGACY_ERP          111
+#define WLC_SET_LEGACY_ERP          112
+#define WLC_GET_RX_ANT              113
+#define WLC_GET_CURR_RATESET            114 
+#define WLC_GET_SCANSUPPRESS            115
+#define WLC_SET_SCANSUPPRESS            116
+#define WLC_GET_AP              117
+#define WLC_SET_AP              118
+#define WLC_GET_EAP_RESTRICT            119
+#define WLC_SET_EAP_RESTRICT            120
+#define WLC_SCB_AUTHORIZE           121
+#define WLC_SCB_DEAUTHORIZE         122
+#define WLC_GET_WDSLIST             123
+#define WLC_SET_WDSLIST             124
+#define WLC_GET_ATIM                125
+#define WLC_SET_ATIM                126
+#define WLC_GET_RSSI                127
+#define WLC_GET_PHYANTDIV           128
+#define WLC_SET_PHYANTDIV           129
+#define WLC_AP_RX_ONLY              130
+#define WLC_GET_TX_PATH_PWR         131
+#define WLC_SET_TX_PATH_PWR         132
+#define WLC_GET_WSEC                133
+#define WLC_SET_WSEC                134
+#define WLC_GET_PHY_NOISE           135
+#define WLC_GET_BSS_INFO            136
+#define WLC_GET_PKTCNTS             137
+#define WLC_GET_LAZYWDS             138
+#define WLC_SET_LAZYWDS             139
+#define WLC_GET_BANDLIST            140
+#define WLC_GET_BAND                141
+#define WLC_SET_BAND                142
+#define WLC_SCB_DEAUTHENTICATE          143
+#define WLC_GET_SHORTSLOT           144
+#define WLC_GET_SHORTSLOT_OVERRIDE      145
+#define WLC_SET_SHORTSLOT_OVERRIDE      146
+#define WLC_GET_SHORTSLOT_RESTRICT      147
+#define WLC_SET_SHORTSLOT_RESTRICT      148
+#define WLC_GET_GMODE_PROTECTION        149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE   150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE   151
+#define WLC_UPGRADE             152
+ 
+ 
+#define WLC_GET_IGNORE_BCNS         155
+#define WLC_SET_IGNORE_BCNS         156
+#define WLC_GET_SCB_TIMEOUT         157
+#define WLC_SET_SCB_TIMEOUT         158
+#define WLC_GET_ASSOCLIST           159
+#define WLC_GET_CLK             160
+#define WLC_SET_CLK             161
+#define WLC_GET_UP              162
+#define WLC_OUT                 163
+#define WLC_GET_WPA_AUTH            164
+#define WLC_SET_WPA_AUTH            165
+#define WLC_GET_UCFLAGS             166
+#define WLC_SET_UCFLAGS             167
+#define WLC_GET_PWRIDX              168
+#define WLC_SET_PWRIDX              169
+#define WLC_GET_TSSI                170
+#define WLC_GET_SUP_RATESET_OVERRIDE        171
+#define WLC_SET_SUP_RATESET_OVERRIDE        172
+ 
+ 
+ 
+ 
+ 
+#define WLC_GET_PROTECTION_CONTROL      178
+#define WLC_SET_PROTECTION_CONTROL      179
+#define WLC_GET_PHYLIST             180
+#define WLC_ENCRYPT_STRENGTH            181 
+#define WLC_DECRYPT_STATUS          182 
+#define WLC_GET_KEY_SEQ             183
+#define WLC_GET_SCAN_CHANNEL_TIME       184
+#define WLC_SET_SCAN_CHANNEL_TIME       185
+#define WLC_GET_SCAN_UNASSOC_TIME       186
+#define WLC_SET_SCAN_UNASSOC_TIME       187
+#define WLC_GET_SCAN_HOME_TIME          188
+#define WLC_SET_SCAN_HOME_TIME          189
+#define WLC_GET_SCAN_NPROBES            190
+#define WLC_SET_SCAN_NPROBES            191
+#define WLC_GET_PRB_RESP_TIMEOUT        192
+#define WLC_SET_PRB_RESP_TIMEOUT        193
+#define WLC_GET_ATTEN               194
+#define WLC_SET_ATTEN               195
+#define WLC_GET_SHMEM               196 
+#define WLC_SET_SHMEM               197 
+ 
+ 
+#define WLC_SET_WSEC_TEST           200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON   201
+#define WLC_TKIP_COUNTERMEASURES        202
+#define WLC_GET_PIOMODE             203
+#define WLC_SET_PIOMODE             204
+#define WLC_SET_ASSOC_PREFER            205
+#define WLC_GET_ASSOC_PREFER            206
+#define WLC_SET_ROAM_PREFER         207
+#define WLC_GET_ROAM_PREFER         208
+#define WLC_SET_LED             209
+#define WLC_GET_LED             210
+#define WLC_GET_INTERFERENCE_MODE       211
+#define WLC_SET_INTERFERENCE_MODE       212
+#define WLC_GET_CHANNEL_QA          213
+#define WLC_START_CHANNEL_QA            214
+#define WLC_GET_CHANNEL_SEL         215
+#define WLC_START_CHANNEL_SEL           216
+#define WLC_GET_VALID_CHANNELS          217
+#define WLC_GET_FAKEFRAG            218
+#define WLC_SET_FAKEFRAG            219
+#define WLC_GET_PWROUT_PERCENTAGE       220
+#define WLC_SET_PWROUT_PERCENTAGE       221
+#define WLC_SET_BAD_FRAME_PREEMPT       222
+#define WLC_GET_BAD_FRAME_PREEMPT       223
+#define WLC_SET_LEAP_LIST           224
+#define WLC_GET_LEAP_LIST           225
+#define WLC_GET_CWMIN               226
+#define WLC_SET_CWMIN               227
+#define WLC_GET_CWMAX               228
+#define WLC_SET_CWMAX               229
+#define WLC_GET_WET             230
+#define WLC_SET_WET             231
+#define WLC_GET_PUB             232
+ 
+ 
+#define WLC_GET_KEY_PRIMARY         235
+#define WLC_SET_KEY_PRIMARY         236
+ 
+#define WLC_GET_ACI_ARGS            238
+#define WLC_SET_ACI_ARGS            239
+#define WLC_UNSET_CALLBACK          240
+#define WLC_SET_CALLBACK            241
+#define WLC_GET_RADAR               242
+#define WLC_SET_RADAR               243
+#define WLC_SET_SPECT_MANAGMENT         244
+#define WLC_GET_SPECT_MANAGMENT         245
+#define WLC_WDS_GET_REMOTE_HWADDR       246 
+#define WLC_WDS_GET_WPA_SUP         247
+#define WLC_SET_CS_SCAN_TIMER           248
+#define WLC_GET_CS_SCAN_TIMER           249
+#define WLC_MEASURE_REQUEST         250
+#define WLC_INIT                251
+#define WLC_SEND_QUIET              252
+#define WLC_KEEPALIVE           253
+#define WLC_SEND_PWR_CONSTRAINT         254
+#define WLC_UPGRADE_STATUS          255
+#define WLC_CURRENT_PWR             256
+#define WLC_GET_SCAN_PASSIVE_TIME       257
+#define WLC_SET_SCAN_PASSIVE_TIME       258
+#define WLC_LEGACY_LINK_BEHAVIOR        259
+#define WLC_GET_CHANNELS_IN_COUNTRY     260
+#define WLC_GET_COUNTRY_LIST            261
+#define WLC_GET_VAR             262 
+#define WLC_SET_VAR             263 
+#define WLC_NVRAM_GET               264 
+#define WLC_NVRAM_SET               265
+#define WLC_NVRAM_DUMP              266
+#define WLC_REBOOT              267
+#define WLC_SET_WSEC_PMK            268
+#define WLC_GET_AUTH_MODE           269
+#define WLC_SET_AUTH_MODE           270
+#define WLC_GET_WAKEENTRY           271
+#define WLC_SET_WAKEENTRY           272
+#define WLC_NDCONFIG_ITEM           273 
+#define WLC_NVOTPW              274
+#define WLC_OTPW                275
+#define WLC_IOV_BLOCK_GET           276
+#define WLC_IOV_MODULES_GET         277
+#define WLC_SOFT_RESET              278
+#define WLC_GET_ALLOW_MODE          279
+#define WLC_SET_ALLOW_MODE          280
+#define WLC_GET_DESIRED_BSSID           281
+#define WLC_SET_DESIRED_BSSID           282
+#define WLC_DISASSOC_MYAP           283
+#define WLC_GET_NBANDS              284 
+#define WLC_GET_BANDSTATES          285 
+#define WLC_GET_WLC_BSS_INFO            286 
+#define WLC_GET_ASSOC_INFO          287 
+#define WLC_GET_OID_PHY             288 
+#define WLC_SET_OID_PHY             289 
+#define WLC_SET_ASSOC_TIME          290 
+#define WLC_GET_DESIRED_SSID            291 
+#define WLC_GET_CHANSPEC            292 
+#define WLC_GET_ASSOC_STATE         293 
+#define WLC_SET_PHY_STATE           294 
+#define WLC_GET_SCAN_PENDING            295 
+#define WLC_GET_SCANREQ_PENDING         296 
+#define WLC_GET_PREV_ROAM_REASON        297 
+#define WLC_SET_PREV_ROAM_REASON        298 
+#define WLC_GET_BANDSTATES_PI           299 
+#define WLC_GET_PHY_STATE           300 
+#define WLC_GET_BSS_WPA_RSN         301 
+#define WLC_GET_BSS_WPA2_RSN            302 
+#define WLC_GET_BSS_BCN_TS          303 
+#define WLC_GET_INT_DISASSOC            304 
+#define WLC_SET_NUM_PEERS           305     
+#define WLC_GET_NUM_BSS             306 
+#define WLC_NPHY_SAMPLE_COLLECT         307 
+#define WLC_UM_PRIV             308 
+#define WLC_GET_CMD             309
+  
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE  311 
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE  312 
+#define WLC_GET_WAI_RESTRICT            313 
+#define WLC_SET_WAI_RESTRICT            314 
+#define WLC_SET_WAI_REKEY           315 
+#define WLC_SET_PEAKRATE            316 
+#define WLC_GET_PEAKRATE            317
+#define WLC_LAST                318
+
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE      0xABADCEDE
+#endif
+
+
+#define CMN_IOCTL_OFF 0x180
+
+
+
+
+#define WL_OID_BASE     0xFFE41420
+
+
+#define OID_WL_GETINSTANCE  (WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK    (WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK    (WL_OID_BASE + WLC_SET_FORCELINK)
+#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS   (WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM    (WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+
+#define OID_STA_CHANSPEC    (WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS      (WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY     (WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY     (WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME  (WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID    (WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE   (WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING    (WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE   (WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC    (WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS   (WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
+
+#define WL_DECRYPT_STATUS_SUCCESS   1
+#define WL_DECRYPT_STATUS_FAILURE   2
+#define WL_DECRYPT_STATUS_UNKNOWN   3
+
+
+#define WLC_UPGRADE_SUCCESS         0
+#define WLC_UPGRADE_PENDING         1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+
+typedef struct {
+	char *name;
+	void *param;
+} ndconfig_item_t;
+#endif
+
+
+
+#define WL_AUTH_OPEN_SYSTEM     0   
+#define WL_AUTH_SHARED_KEY      1   
+#define WL_AUTH_OPEN_SHARED     2   
+
+
+#define WL_RADIO_SW_DISABLE     (1<<0)
+#define WL_RADIO_HW_DISABLE     (1<<1)
+#define WL_RADIO_MPC_DISABLE        (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE    (1<<3)  
+
+#define WL_SPURAVOID_OFF    0
+#define WL_SPURAVOID_ON1    1
+#define WL_SPURAVOID_ON2    2
+
+
+#define WL_TXPWR_OVERRIDE   (1U<<31)
+#define WL_TXPWR_NEG   (1U<<30)
+
+#define WL_PHY_PAVARS_LEN   6   
+
+#define WL_PHY_PAVARS2_NUM	3	
+#define WL_PHY_PAVAR_VER	1	
+typedef struct wl_pavars2 {
+	uint16 ver;		
+	uint16 len;		
+	uint16 inuse;		
+	uint16 phy_type;	
+	uint16 bandrange;
+	uint16 chain;
+	uint16 inpa[WL_PHY_PAVARS2_NUM];	
+} wl_pavars2_t;
+
+typedef struct wl_po {
+	uint16  phy_type;   
+	uint16  band;
+	uint16  cckpo;
+	uint32  ofdmpo;
+	uint16  mcspo[8];
+} wl_po_t;
+
+
+#define WLC_TXPWR_MAX       (127)   
+
+
+#define WL_DIAG_INTERRUPT           1   
+#define WL_DIAG_LOOPBACK            2   
+#define WL_DIAG_MEMORY              3   
+#define WL_DIAG_LED             4   
+#define WL_DIAG_REG             5   
+#define WL_DIAG_SROM                6   
+#define WL_DIAG_DMA             7   
+
+#define WL_DIAGERR_SUCCESS          0
+#define WL_DIAGERR_FAIL_TO_RUN          1   
+#define WL_DIAGERR_NOT_SUPPORTED        2   
+#define WL_DIAGERR_INTERRUPT_FAIL       3   
+#define WL_DIAGERR_LOOPBACK_FAIL        4   
+#define WL_DIAGERR_SROM_FAIL            5   
+#define WL_DIAGERR_SROM_BADCRC          6   
+#define WL_DIAGERR_REG_FAIL         7   
+#define WL_DIAGERR_MEMORY_FAIL          8   
+#define WL_DIAGERR_NOMEM            9   
+#define WL_DIAGERR_DMA_FAIL         10  
+
+#define WL_DIAGERR_MEMORY_TIMEOUT       11  
+#define WL_DIAGERR_MEMORY_BADPATTERN        12  
+
+
+#define WLC_BAND_AUTO       0   
+#define WLC_BAND_5G     1   
+#define WLC_BAND_2G     2   
+#define WLC_BAND_ALL        3   
+
+
+#define WL_CHAN_FREQ_RANGE_2G      0
+#define WL_CHAN_FREQ_RANGE_5GL     1
+#define WL_CHAN_FREQ_RANGE_5GM     2
+#define WL_CHAN_FREQ_RANGE_5GH     3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_VER2    4
+#define WL_CHAN_FREQ_RANGE_5GLH_VER2    5
+#define WL_CHAN_FREQ_RANGE_5GML_VER2    6
+#define WL_CHAN_FREQ_RANGE_5GMH_VER2    7
+#define WL_CHAN_FREQ_RANGE_5GH_VER2     8
+
+
+#define WLC_PHY_TYPE_A      0
+#define WLC_PHY_TYPE_B      1
+#define WLC_PHY_TYPE_G      2
+#define WLC_PHY_TYPE_N      4
+#define WLC_PHY_TYPE_LP     5
+#define WLC_PHY_TYPE_SSN    6
+#define WLC_PHY_TYPE_HT     7
+#define WLC_PHY_TYPE_LCN    8
+#define WLC_PHY_TYPE_NULL   0xf
+
+
+#define WLC_MACMODE_DISABLED    0   
+#define WLC_MACMODE_DENY    1   
+#define WLC_MACMODE_ALLOW   2   
+
+
+#define GMODE_LEGACY_B      0
+#define GMODE_AUTO      1
+#define GMODE_ONLY      2
+#define GMODE_B_DEFERRED    3
+#define GMODE_PERFORMANCE   4
+#define GMODE_LRS       5
+#define GMODE_MAX       6
+
+
+#define WLC_PLCP_AUTO   -1
+#define WLC_PLCP_SHORT  0
+#define WLC_PLCP_LONG   1
+
+
+#define WLC_PROTECTION_AUTO     -1
+#define WLC_PROTECTION_OFF      0
+#define WLC_PROTECTION_ON       1
+#define WLC_PROTECTION_MMHDR_ONLY   2
+#define WLC_PROTECTION_CTS_ONLY     3
+
+
+#define WLC_PROTECTION_CTL_OFF      0
+#define WLC_PROTECTION_CTL_LOCAL    1
+#define WLC_PROTECTION_CTL_OVERLAP  2
+
+
+#define WLC_N_PROTECTION_OFF        0
+#define WLC_N_PROTECTION_OPTIONAL   1
+#define WLC_N_PROTECTION_20IN40     2
+#define WLC_N_PROTECTION_MIXEDMODE  3
+
+
+#define WLC_N_PREAMBLE_MIXEDMODE    0
+#define WLC_N_PREAMBLE_GF       1
+#define WLC_N_PREAMBLE_GF_BRCM          2
+
+
+#define WLC_N_BW_20ALL          0
+#define WLC_N_BW_40ALL          1
+#define WLC_N_BW_20IN2G_40IN5G      2
+
+
+#define WLC_N_TXRX_CHAIN0       0
+#define WLC_N_TXRX_CHAIN1       1
+
+
+#define WLC_N_SGI_20            0x01
+#define WLC_N_SGI_40            0x02
+
+
+#define PM_OFF  0
+#define PM_MAX  1
+#define PM_FAST 2
+
+#define LISTEN_INTERVAL		10
+
+#define INTERFERE_OVRRIDE_OFF   -1  
+#define INTERFERE_NONE  0   
+#define NON_WLAN    1   
+#define WLAN_MANUAL 2   
+#define WLAN_AUTO   3   
+#define WLAN_AUTO_W_NOISE   4   
+#define AUTO_ACTIVE (1 << 7) 
+
+typedef struct wl_aci_args {
+	int enter_aci_thresh; 
+	int exit_aci_thresh; 
+	int usec_spin; 
+	int glitch_delay; 
+	uint16 nphy_adcpwr_enter_thresh;    
+	uint16 nphy_adcpwr_exit_thresh; 
+	uint16 nphy_repeat_ctr;     
+	uint16 nphy_num_samples;    
+	uint16 nphy_undetect_window_sz; 
+	uint16 nphy_b_energy_lo_aci;    
+	uint16 nphy_b_energy_md_aci;    
+	uint16 nphy_b_energy_hi_aci;    
+	uint16 nphy_noise_noassoc_glitch_th_up; 
+	uint16 nphy_noise_noassoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_glitch_th_up;
+	uint16 nphy_noise_assoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_aci_glitch_th_up;
+	uint16 nphy_noise_assoc_aci_glitch_th_dn;
+	uint16 nphy_noise_assoc_enter_th;
+	uint16 nphy_noise_noassoc_enter_th;
+	uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+	uint16 nphy_noise_noassoc_crsidx_incr;
+	uint16 nphy_noise_assoc_crsidx_incr;
+	uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define TRIGGER_NOW             0
+#define TRIGGER_CRS             0x01
+#define TRIGGER_CRSDEASSERT         0x02
+#define TRIGGER_GOODFCS             0x04
+#define TRIGGER_BADFCS              0x08
+#define TRIGGER_BADPLCP             0x10
+#define TRIGGER_CRSGLITCH           0x20
+#define WL_ACI_ARGS_LEGACY_LENGTH   16  
+#define WL_SAMPLECOLLECT_T_VERSION  1   
+typedef struct wl_samplecollect_args {
+	
+	uint8 coll_us;
+	int cores;
+	
+	uint16 version;     
+	uint16 length;      
+	uint8 trigger;
+	uint16 timeout;
+	uint16 mode;
+	uint32 pre_dur;
+	uint32 post_dur;
+	uint8 gpio_sel;
+	bool downsamp;
+	bool be_deaf;
+	bool agc;       
+	bool filter;        
+} wl_samplecollect_args_t;
+
+#define WL_SAMPLEDATA_HEADER_TYPE   1
+#define WL_SAMPLEDATA_HEADER_SIZE   80  
+#define WL_SAMPLEDATA_TYPE      2
+#define WL_SAMPLEDATA_SEQ       0xff    
+#define WL_SAMPLEDATA_MORE_DATA     0x100   
+#define WL_SAMPLEDATA_T_VERSION     1   
+
+#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+	uint16 version; 
+	uint16 size;    
+	uint16 tag; 
+	uint16 length;  
+	uint32 flag;    
+} wl_sampledata_t;
+
+
+
+#define WL_ERROR_VAL        0x00000001
+#define WL_TRACE_VAL        0x00000002
+#define WL_PRHDRS_VAL       0x00000004
+#define WL_PRPKT_VAL        0x00000008
+#define WL_INFORM_VAL       0x00000010
+#define WL_TMP_VAL      0x00000020
+#define WL_OID_VAL      0x00000040
+#define WL_RATE_VAL     0x00000080
+#define WL_ASSOC_VAL        0x00000100
+#define WL_PRUSR_VAL        0x00000200
+#define WL_PS_VAL       0x00000400
+#define WL_TXPWR_VAL        0x00000800  
+#define WL_PORT_VAL     0x00001000
+#define WL_DUAL_VAL     0x00002000
+#define WL_WSEC_VAL     0x00004000
+#define WL_WSEC_DUMP_VAL    0x00008000
+#define WL_LOG_VAL      0x00010000
+#define WL_NRSSI_VAL        0x00020000  
+#define WL_LOFT_VAL     0x00040000  
+#define WL_REGULATORY_VAL   0x00080000
+#define WL_PHYCAL_VAL       0x00100000  
+#define WL_RADAR_VAL        0x00200000  
+#define WL_MPC_VAL      0x00400000
+#define WL_APSTA_VAL        0x00800000
+#define WL_DFS_VAL      0x01000000
+#define WL_BA_VAL       0x02000000
+#define WL_ACI_VAL      0x04000000
+#define WL_MBSS_VAL     0x04000000
+#define WL_CAC_VAL      0x08000000
+#define WL_AMSDU_VAL        0x10000000
+#define WL_AMPDU_VAL        0x20000000
+#define WL_FFPLD_VAL        0x40000000
+
+
+#define WL_DPT_VAL      0x00000001
+#define WL_SCAN_VAL     0x00000002
+#define WL_WOWL_VAL     0x00000004
+#define WL_COEX_VAL     0x00000008
+#define WL_RTDC_VAL     0x00000010
+#define WL_PROTO_VAL        0x00000020
+#define WL_BTA_VAL      0x00000040
+#define WL_CHANINT_VAL      0x00000080
+#define WL_THERMAL_VAL      0x00000100  
+#define WL_P2P_VAL      0x00000200
+#define WL_TXRX_VAL		0x00000400
+#define WL_MCHAN_VAL            0x00000800
+
+
+#define WL_LED_NUMGPIO      16  
+
+
+#define WL_LED_OFF      0       
+#define WL_LED_ON       1       
+#define WL_LED_ACTIVITY     2       
+#define WL_LED_RADIO        3       
+#define WL_LED_ARADIO       4       
+#define WL_LED_BRADIO       5       
+#define WL_LED_BGMODE       6       
+#define WL_LED_WI1      7
+#define WL_LED_WI2      8
+#define WL_LED_WI3      9
+#define WL_LED_ASSOC        10      
+#define WL_LED_INACTIVE     11      
+#define WL_LED_ASSOCACT     12      
+#define WL_LED_WI4      13
+#define WL_LED_WI5      14
+#define WL_LED_BLINKSLOW    15      
+#define WL_LED_BLINKMED     16      
+#define WL_LED_BLINKFAST    17      
+#define WL_LED_BLINKCUSTOM  18      
+#define WL_LED_BLINKPERIODIC    19      
+#define WL_LED_ASSOC_WITH_SEC   20      
+						
+#define WL_LED_START_OFF    21      
+#define WL_LED_NUMBEHAVIOR  22
+
+
+#define WL_LED_BEH_MASK     0x7f        
+#define WL_LED_AL_MASK      0x80        
+
+
+#define WL_NUMCHANNELS      64
+#define WL_NUMCHANSPECS     100
+
+
+#define WL_WDS_WPA_ROLE_AUTH    0   
+#define WL_WDS_WPA_ROLE_SUP 1   
+#define WL_WDS_WPA_ROLE_AUTO    255 
+
+
+#define WL_EVENTING_MASK_LEN    16
+
+
+
+
+#define WL_JOIN_PREF_RSSI   1   
+#define WL_JOIN_PREF_WPA    2   
+#define WL_JOIN_PREF_BAND   3   
+#define WL_JOIN_PREF_RSSI_DELTA 4   
+
+
+#define WLJP_BAND_ASSOC_PREF    255 
+
+
+#define WL_WPA_ACP_MCS_ANY  "\x00\x00\x00\x00"
+
+struct tsinfo_arg {
+	uint8 octets[3];
+};
+
+#define NFIFO           6   
+
+#define WL_CNT_T_VERSION    6   
+
+typedef struct {
+	uint16  version;    
+	uint16  length;     
+
+	
+	uint32  txframe;    
+	uint32  txbyte;     
+	uint32  txretrans;  
+	uint32  txerror;    
+	uint32  txctl;      
+	uint32  txprshort;  
+	uint32  txserr;     
+	uint32  txnobuf;    
+	uint32  txnoassoc;  
+	uint32  txrunt;     
+	uint32  txchit;     
+	uint32  txcmiss;    
+
+	
+	uint32  txuflo;     
+	uint32  txphyerr;   
+	uint32  txphycrs;
+
+	
+	uint32  rxframe;    
+	uint32  rxbyte;     
+	uint32  rxerror;    
+	uint32  rxctl;      
+	uint32  rxnobuf;    
+	uint32  rxnondata;  
+	uint32  rxbadds;    
+	uint32  rxbadcm;    
+	uint32  rxfragerr;  
+	uint32  rxrunt;     
+	uint32  rxgiant;    
+	uint32  rxnoscb;    
+	uint32  rxbadproto; 
+	uint32  rxbadsrcmac;    
+	uint32  rxbadda;    
+	uint32  rxfilter;   
+
+	
+	uint32  rxoflo;     
+	uint32  rxuflo[NFIFO];  
+
+	uint32  d11cnt_txrts_off;   
+	uint32  d11cnt_rxcrc_off;   
+	uint32  d11cnt_txnocts_off; 
+
+	
+	uint32  dmade;      
+	uint32  dmada;      
+	uint32  dmape;      
+	uint32  reset;      
+	uint32  tbtt;       
+	uint32  txdmawar;
+	uint32  pkt_callback_reg_fail;  
+
+	
+	uint32  txallfrm;   
+	uint32  txrtsfrm;   
+	uint32  txctsfrm;   
+	uint32  txackfrm;   
+	uint32  txdnlfrm;   
+	uint32  txbcnfrm;   
+	uint32  txfunfl[8]; 
+	uint32  txtplunfl;  
+	uint32  txphyerror; 
+	uint32  rxfrmtoolong;   
+	uint32  rxfrmtooshrt;   
+	uint32  rxinvmachdr;    
+	uint32  rxbadfcs;   
+	uint32  rxbadplcp;  
+	uint32  rxcrsglitch;    
+	uint32  rxstrt;     
+	uint32  rxdfrmucastmbss; 
+	uint32  rxmfrmucastmbss; 
+	uint32  rxcfrmucast;    
+	uint32  rxrtsucast; 
+	uint32  rxctsucast; 
+	uint32  rxackucast; 
+	uint32  rxdfrmocast;    
+	uint32  rxmfrmocast;    
+	uint32  rxcfrmocast;    
+	uint32  rxrtsocast; 
+	uint32  rxctsocast; 
+	uint32  rxdfrmmcast;    
+	uint32  rxmfrmmcast;    
+	uint32  rxcfrmmcast;    
+	uint32  rxbeaconmbss;   
+	uint32  rxdfrmucastobss; 
+	uint32  rxbeaconobss;   
+	uint32  rxrsptmout; 
+	uint32  bcntxcancl; 
+	uint32  rxf0ovfl;   
+	uint32  rxf1ovfl;   
+	uint32  rxf2ovfl;   
+	uint32  txsfovfl;   
+	uint32  pmqovfl;    
+	uint32  rxcgprqfrm; 
+	uint32  rxcgprsqovfl;   
+	uint32  txcgprsfail;    
+	uint32  txcgprssuc; 
+	uint32  prs_timeout;    
+	uint32  rxnack;
+	uint32  frmscons;
+	uint32  txnack;
+	uint32  txglitch_nack;  
+	uint32  txburst;    
+
+	
+	uint32  txfrag;     
+	uint32  txmulti;    
+	uint32  txfail;     
+	uint32  txretry;    
+	uint32  txretrie;   
+	uint32  rxdup;      
+	uint32  txrts;      
+	uint32  txnocts;    
+	uint32  txnoack;    
+	uint32  rxfrag;     
+	uint32  rxmulti;    
+	uint32  rxcrc;      
+	uint32  txfrmsnt;   
+	uint32  rxundec;    
+
+	
+	uint32  tkipmicfaill;   
+	uint32  tkipcntrmsr;    
+	uint32  tkipreplay; 
+	uint32  ccmpfmterr; 
+	uint32  ccmpreplay; 
+	uint32  ccmpundec;  
+	uint32  fourwayfail;    
+	uint32  wepundec;   
+	uint32  wepicverr;  
+	uint32  decsuccess; 
+	uint32  tkipicverr; 
+	uint32  wepexcluded;    
+
+	uint32  rxundec_mcst;   
+
+	
+	uint32  tkipmicfaill_mcst;  
+	uint32  tkipcntrmsr_mcst;   
+	uint32  tkipreplay_mcst;    
+	uint32  ccmpfmterr_mcst;    
+	uint32  ccmpreplay_mcst;    
+	uint32  ccmpundec_mcst; 
+	uint32  fourwayfail_mcst;   
+	uint32  wepundec_mcst;  
+	uint32  wepicverr_mcst; 
+	uint32  decsuccess_mcst;    
+	uint32  tkipicverr_mcst;    
+	uint32  wepexcluded_mcst;   
+
+	uint32  txchanrej;  
+	uint32  txexptime;  
+	uint32  psmwds;     
+	uint32  phywatchdog;    
+
+	
+	uint32  prq_entries_handled;    
+	uint32  prq_undirected_entries; 
+	uint32  prq_bad_entries;    
+	uint32  atim_suppress_count;    
+	uint32  bcn_template_not_ready; 
+	uint32  bcn_template_not_ready_done; 
+	uint32  late_tbtt_dpc;  
+
+	
+	uint32  rx1mbps;    
+	uint32  rx2mbps;    
+	uint32  rx5mbps5;   
+	uint32  rx6mbps;    
+	uint32  rx9mbps;    
+	uint32  rx11mbps;   
+	uint32  rx12mbps;   
+	uint32  rx18mbps;   
+	uint32  rx24mbps;   
+	uint32  rx36mbps;   
+	uint32  rx48mbps;   
+	uint32  rx54mbps;   
+	uint32  rx108mbps;  
+	uint32  rx162mbps;  
+	uint32  rx216mbps;  
+	uint32  rx270mbps;  
+	uint32  rx324mbps;  
+	uint32  rx378mbps;  
+	uint32  rx432mbps;  
+	uint32  rx486mbps;  
+	uint32  rx540mbps;  
+
+	
+	uint32  pktengrxducast; 
+	uint32  pktengrxdmcast; 
+
+	uint32  rfdisable;  
+	uint32  bphy_rxcrsglitch;   
+
+	uint32  txmpdu_sgi; 
+	uint32  rxmpdu_sgi; 
+	uint32  txmpdu_stbc;    
+	uint32  rxmpdu_stbc;    
+} wl_cnt_t;
+
+
+#define WL_WME_CNT_VERSION  1   
+
+typedef struct {
+	uint32 packets;
+	uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+	uint16  version;    
+	uint16  length;     
+
+	wl_traffic_stats_t tx[AC_COUNT];    
+	wl_traffic_stats_t tx_failed[AC_COUNT]; 
+	wl_traffic_stats_t rx[AC_COUNT];    
+	wl_traffic_stats_t rx_failed[AC_COUNT]; 
+
+	wl_traffic_stats_t forward[AC_COUNT];   
+
+	wl_traffic_stats_t tx_expired[AC_COUNT];    
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+	uint32 low;
+	uint32 high;
+};
+
+
+
+#define WLC_ROAM_TRIGGER_DEFAULT    0 
+#define WLC_ROAM_TRIGGER_BANDWIDTH  1 
+#define WLC_ROAM_TRIGGER_DISTANCE   2 
+#define WLC_ROAM_TRIGGER_AUTO       3 
+#define WLC_ROAM_TRIGGER_MAX_VALUE  3 
+
+
+#define WPA_AUTH_PFN_ANY	0xffffffff	
+
+enum {
+	PFN_LIST_ORDER,
+	PFN_RSSI
+};
+
+enum {
+	DISABLE,
+	ENABLE
+};
+
+enum {
+	OFF_ADAPT,
+	SMART_ADAPT,
+	STRICT_ADAPT
+};
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT	2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT	6
+
+#define SORT_CRITERIA_MASK		0x01
+#define AUTO_NET_SWITCH_MASK	0x02
+#define ENABLE_BKGRD_SCAN_MASK	0x04
+#define IMMEDIATE_SCAN_MASK		0x08
+#define	AUTO_CONNECT_MASK		0x10
+#define ENABLE_BD_SCAN_MASK		0x20
+#define ENABLE_ADAPTSCAN_MASK	0xc0
+
+#define PFN_VERSION				2
+#define PFN_SCANRESULT_VERSION	1
+#define MAX_PFN_LIST_COUNT	16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP				2
+
+
+typedef struct wl_pfn_subnet_info {
+	struct ether_addr BSSID;
+	uint8	channel; 
+	uint8	SSID_len;
+	uint8	SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+	wl_pfn_subnet_info_t pfnsubnet;
+	int16	RSSI; 
+	uint16	timestamp; 
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_scanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+
+typedef struct wl_pfn_param {
+	int32 version;			
+	int32 scan_freq;		
+	int32 lost_network_timeout;	
+	int16 flags;			
+	int16 rssi_margin;		
+	uint8 bestn; 
+	uint8 mscan; 
+	uint8 repeat; 
+	uint8 exp; 
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+	struct ether_addr 	macaddr;
+	
+	uint16				flags;
+} wl_pfn_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+
+typedef struct wl_pfn_cfg {
+	uint32				reporttype;
+	int32				channel_num;
+	uint16				channel_list[WL_NUMCHANNELS];
+} wl_pfn_cfg_t;
+#define WL_PFN_REPORT_ALLNET 	0
+#define WL_PFN_REPORT_SSIDNET 	1
+#define WL_PFN_REPORT_BSSIDNET 	2
+
+typedef struct wl_pfn {
+	wlc_ssid_t		ssid;			
+	int32			flags;			
+	int32			infra;			
+	int32			auth;			
+	int32			wpa_auth;		
+	int32			wsec;			
+} wl_pfn_t;
+#define WL_PFN_HIDDEN_BIT		2
+#define PNO_SCAN_MAX_FW		508*1000	
+#define PNO_SCAN_MAX_FW_SEC	PNO_SCAN_MAX_FW/1000 
+#define PNO_SCAN_MIN_FW_SEC	10			
+#define WL_PFN_HIDDEN_MASK		0x4
+
+
+#define TOE_TX_CSUM_OL      0x00000001
+#define TOE_RX_CSUM_OL      0x00000002
+
+
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2    0x00000004
+
+struct toe_ol_stats_t {
+	
+	uint32 tx_summed;
+
+	
+	uint32 tx_iph_fill;
+	uint32 tx_tcp_fill;
+	uint32 tx_udp_fill;
+	uint32 tx_icmp_fill;
+
+	
+	uint32 rx_iph_good;
+	uint32 rx_iph_bad;
+	uint32 rx_tcp_good;
+	uint32 rx_tcp_bad;
+	uint32 rx_udp_good;
+	uint32 rx_udp_bad;
+	uint32 rx_icmp_good;
+	uint32 rx_icmp_bad;
+
+	
+	uint32 tx_tcp_errinj;
+	uint32 tx_udp_errinj;
+	uint32 tx_icmp_errinj;
+
+	
+	uint32 rx_tcp_errinj;
+	uint32 rx_udp_errinj;
+	uint32 rx_icmp_errinj;
+};
+
+
+#define ARP_OL_AGENT        0x00000001
+#define ARP_OL_SNOOP        0x00000002
+#define ARP_OL_HOST_AUTO_REPLY  0x00000004
+#define ARP_OL_PEER_AUTO_REPLY  0x00000008
+
+
+#define ARP_ERRTEST_REPLY_PEER  0x1
+#define ARP_ERRTEST_REPLY_HOST  0x2
+
+#define ARP_MULTIHOMING_MAX 8   
+
+
+struct arp_ol_stats_t {
+	uint32  host_ip_entries;    
+	uint32  host_ip_overflow;   
+
+	uint32  arp_table_entries;  
+	uint32  arp_table_overflow; 
+
+	uint32  host_request;       
+	uint32  host_reply;     
+	uint32  host_service;       
+
+	uint32  peer_request;       
+	uint32  peer_request_drop;  
+	uint32  peer_reply;     
+	uint32  peer_reply_drop;    
+	uint32  peer_service;       
+};
+
+
+
+
+typedef struct wl_keep_alive_pkt {
+	uint32  period_msec;    
+	uint16  len_bytes;  
+	uint8   data[1];    
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN     OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+
+
+typedef enum wl_pkt_filter_type {
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH    
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+
+typedef struct wl_pkt_filter_pattern {
+	uint32  offset;     
+	uint32  size_bytes; 
+	uint8   mask_and_pattern[1]; 
+} wl_pkt_filter_pattern_t;
+
+
+typedef struct wl_pkt_filter {
+	uint32  id;     
+	uint32  type;       
+	uint32  negate_match;   
+	union {         
+		wl_pkt_filter_pattern_t pattern;    
+	} u;
+} wl_pkt_filter_t;
+
+#define WL_PKT_FILTER_FIXED_LEN       OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN   OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+
+
+typedef struct wl_pkt_filter_enable {
+	uint32  id;     
+	uint32  enable;     
+} wl_pkt_filter_enable_t;
+
+
+typedef struct wl_pkt_filter_list {
+	uint32  num;        
+	wl_pkt_filter_t filter[1];  
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN      OFFSETOF(wl_pkt_filter_list_t, filter)
+
+
+typedef struct wl_pkt_filter_stats {
+	uint32  num_pkts_matched;   
+	uint32  num_pkts_forwarded; 
+	uint32  num_pkts_discarded; 
+} wl_pkt_filter_stats_t;
+
+
+typedef struct wl_seq_cmd_ioctl {
+	uint32 cmd;     
+	uint32 len;     
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES  4
+
+
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+	(((cmd) == WLC_GET_MAGIC) || \
+	 ((cmd) == WLC_GET_VERSION) || \
+	 ((cmd) == WLC_GET_AP) || \
+	 ((cmd) == WLC_GET_INSTANCE))
+
+
+
+#define WL_PKTENG_PER_TX_START          0x01
+#define WL_PKTENG_PER_TX_STOP           0x02
+#define WL_PKTENG_PER_RX_START          0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START     0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START     0x06
+#define WL_PKTENG_PER_RX_STOP           0x08
+#define WL_PKTENG_PER_MASK          0xff
+
+#define WL_PKTENG_SYNCHRONOUS           0x100   
+
+typedef struct wl_pkteng {
+	uint32 flags;
+	uint32 delay;           
+	uint32 nframes;         
+	uint32 length;          
+	uint8  seqno;           
+	struct ether_addr dest;     
+	struct ether_addr src;      
+} wl_pkteng_t;
+
+#define NUM_80211b_RATES    4
+#define NUM_80211ag_RATES   8
+#define NUM_80211n_RATES    32
+#define NUM_80211_RATES     (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+typedef struct wl_pkteng_stats {
+	uint32 lostfrmcnt;      
+	int32 rssi;         
+	int32 snr;          
+	uint16 rxpktcnt[NUM_80211_RATES+1];
+} wl_pkteng_stats_t;
+
+
+#define WL_WOWL_MAGIC   (1 << 0)    
+#define WL_WOWL_NET (1 << 1)    
+#define WL_WOWL_DIS (1 << 2)    
+#define WL_WOWL_RETR    (1 << 3)    
+#define WL_WOWL_BCN (1 << 4)    
+#define WL_WOWL_TST (1 << 5)    
+#define WL_WOWL_M1      (1 << 6)        
+#define WL_WOWL_EAPID   (1 << 7)        
+#define WL_WOWL_KEYROT  (1 << 14)       
+#define WL_WOWL_BCAST   (1 << 15)   
+
+#define MAGIC_PKT_MINLEN 102    
+
+typedef struct {
+	uint masksize;      
+	uint offset;        
+	uint patternoffset; 
+	uint patternsize;   
+	ulong id;       
+	
+	
+} wl_wowl_pattern_t;
+
+typedef struct {
+	uint            count;
+	wl_wowl_pattern_t   pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct {
+	uint8   pci_wakeind;    
+	uint16  ucode_wakeind;  
+} wl_wowl_wakeind_t;
+
+
+typedef struct wl_txrate_class {
+	uint8       init_rate;
+	uint8       min_rate;
+	uint8       max_rate;
+} wl_txrate_class_t;
+
+
+
+
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT     20  
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN         5   
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX         1000    
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT      10  
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN          10  
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX          1000    
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT    300 
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN        10  
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX        900 
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT  5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN  5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX  100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000   
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT  20  
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN  20  
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX  10000   
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT    25  
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN        0   
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX        100 
+
+
+typedef struct wl_obss_scan_arg {
+	int16   passive_dwell;
+	int16   active_dwell;
+	int16   bss_widthscan_interval;
+	int16   passive_total;
+	int16   active_total;
+	int16   chanwidth_transition_delay;
+	int16   activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN  sizeof(wl_obss_scan_arg_t)
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7  
+
+#define WL_COEX_INFO_MASK       0x07
+#define WL_COEX_INFO_REQ        0x01
+#define WL_COEX_40MHZ_INTOLERANT    0x02
+#define WL_COEX_WIDTH20         0x04
+
+#define WLC_RSSI_INVALID     0  
+
+#define MAX_RSSI_LEVELS 8
+
+
+typedef struct wl_rssi_event {
+	uint32 rate_limit_msec;     
+	uint8 num_rssi_levels;      
+	int8 rssi_levels[MAX_RSSI_LEVELS];  
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+	uint8 info;
+	uint8 num;
+	uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+#define EXTLOG_CUR_VER      0x0100
+
+#define MAX_ARGSTR_LEN      18 
+
+
+#define LOG_MODULE_COMMON   0x0001
+#define LOG_MODULE_ASSOC    0x0002
+#define LOG_MODULE_EVENT    0x0004
+#define LOG_MODULE_MAX      3           
+
+
+#define WL_LOG_LEVEL_DISABLE    0
+#define WL_LOG_LEVEL_ERR    1
+#define WL_LOG_LEVEL_WARN   2
+#define WL_LOG_LEVEL_INFO   3
+#define WL_LOG_LEVEL_MAX    WL_LOG_LEVEL_INFO   
+
+
+#define LOG_FLAG_EVENT      1
+
+
+#define LOG_ARGTYPE_NULL    0
+#define LOG_ARGTYPE_STR     1   
+#define LOG_ARGTYPE_INT     2   
+#define LOG_ARGTYPE_INT_STR 3   
+#define LOG_ARGTYPE_STR_INT 4   
+
+typedef struct wlc_extlog_cfg {
+	int max_number;
+	uint16 module;  
+	uint8 level;
+	uint8 flag;
+	uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+	uint32 time;
+	uint16 module;
+	uint16 id;
+	uint8 level;
+	uint8 sub_unit;
+	uint8 seq_num;
+	int32 arg;
+	char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+	uint32 from_last;
+	uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+	uint16 version;
+	uint16 record_len;
+	uint32 num;
+	log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+	uint16  id;
+	uint16  flag;
+	uint8   arg_type;
+	const char  *fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER        1
+
+
+typedef enum {
+	FMTSTR_DRIVER_UP_ID = 0,
+	FMTSTR_DRIVER_DOWN_ID = 1,
+	FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+	FMTSTR_NO_PROGRESS_ID = 3,
+	FMTSTR_RFDISABLE_ID = 4,
+	FMTSTR_REG_PRINT_ID = 5,
+	FMTSTR_EXPTIME_ID = 6,
+	FMTSTR_JOIN_START_ID = 7,
+	FMTSTR_JOIN_COMPLETE_ID = 8,
+	FMTSTR_NO_NETWORKS_ID = 9,
+	FMTSTR_SECURITY_MISMATCH_ID = 10,
+	FMTSTR_RATE_MISMATCH_ID = 11,
+	FMTSTR_AP_PRUNED_ID = 12,
+	FMTSTR_KEY_INSERTED_ID = 13,
+	FMTSTR_DEAUTH_ID = 14,
+	FMTSTR_DISASSOC_ID = 15,
+	FMTSTR_LINK_UP_ID = 16,
+	FMTSTR_LINK_DOWN_ID = 17,
+	FMTSTR_RADIO_HW_OFF_ID = 18,
+	FMTSTR_RADIO_HW_ON_ID = 19,
+	FMTSTR_EVENT_DESC_ID = 20,
+	FMTSTR_PNP_SET_POWER_ID = 21,
+	FMTSTR_RADIO_SW_OFF_ID = 22,
+	FMTSTR_RADIO_SW_ON_ID = 23,
+	FMTSTR_PWD_MISMATCH_ID = 24,
+	FMTSTR_FATAL_ERROR_ID = 25,
+	FMTSTR_AUTH_FAIL_ID = 26,
+	FMTSTR_ASSOC_FAIL_ID = 27,
+	FMTSTR_IBSS_FAIL_ID = 28,
+	FMTSTR_EXTAP_FAIL_ID = 29,
+	FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+	uint32 flags_idx;	
+	uint32 offset;		
+	uint32 len;			
+	
+} wl_ioctl_overlay_t;
+
+#define OVERLAY_IDX_MASK		0x000000ff
+#define OVERLAY_IDX_SHIFT		0
+#define OVERLAY_FLAGS_MASK		0xffffff00
+#define OVERLAY_FLAGS_SHIFT		8
+
+#define OVERLAY_FLAG_POSTLOAD	0x100
+
+#define OVERLAY_FLAG_DEFER_DL	0x200
+
+#define OVERLAY_FLAG_PRESLEEP	0x400
+
+#define OVERLAY_DOWNLOAD_CHUNKSIZE	1024
+#endif 
+
+
+#include <packed_section_end.h>
+
+
+#include <packed_section_start.h>
+
+#define VNDR_IE_CMD_LEN     4   
+
+
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG   0x4
+#define VNDR_IE_AUTHRSP_FLAG    0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG   0x20
+#define VNDR_IE_CUSTOM_FLAG 0x100 
+
+#define VNDR_IE_INFO_HDR_LEN    (sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;         
+	vndr_ie_t vndr_ie_data;     
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;            
+	vndr_ie_info_t vndr_ie_list[1]; 
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];  
+	vndr_ie_buf_t vndr_ie_buffer;   
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+	struct ether_addr staAddr;
+	uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+	sta_prbreq_wps_ie_hdr_t hdr;
+	uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+	uint32 totLen;
+	uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char   dest[ETHER_ADDR_LEN]; 
+	uint8  prio;            
+	uint8  flags;           
+	uint32 tsf_l;           
+	uint32 tsf_h;           
+	uint16 rates;           
+	uint16 txstatus;        
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif 
+
+#include <packed_section_end.h>
+
+
+#define ASSERTLOG_CUR_VER   0x0100
+#define MAX_ASSRTSTR_LEN    64
+
+typedef struct assert_record {
+	uint32 time;
+	uint8 seq_num;
+	char str[MAX_ASSRTSTR_LEN];
+} assert_record_t;
+
+typedef struct assertlog_results {
+	uint16 version;
+	uint16 record_len;
+	uint32 num;
+	assert_record_t logs[1];
+} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN  8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+
+
+
+
+#define CHANIM_DISABLE  0   
+#define CHANIM_DETECT   1   
+#define CHANIM_ACT  2   
+#define CHANIM_MODE_MAX 2
+
+
+#define APCS_IOCTL      1
+#define APCS_CHANIM     2
+#define APCS_CSTIMER    3
+#define APCS_BTA        4
+
+
+#define CHANIM_ACS_RECORD           10
+
+
+typedef struct {
+	bool valid;
+	uint8 trigger;
+	chanspec_t selected_chspc;
+	uint32 glitch_cnt;
+	uint8 ccastats;
+	uint timestamp;
+} chanim_acs_record_t;
+
+typedef struct {
+	chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+	uint8 count;
+	uint timestamp;
+} wl_acs_record_t;
+
+
+
+#define SMFS_VERSION 1
+
+typedef struct wl_smfs_elem {
+	uint32 count;
+	uint16 code;  
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+	uint32 version;
+	uint16 length;  
+	uint8 type;
+	uint8 codetype;
+	uint32 ignored_cnt;
+	uint32 malformed_cnt;
+	uint32 count_total; 
+	wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+	SMFS_CODETYPE_SC,
+	SMFS_CODETYPE_RC
+};
+
+
+#define SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED   0xFFFD
+
+typedef enum smfs_type {
+	SMFS_TYPE_AUTH,
+	SMFS_TYPE_ASSOC,
+	SMFS_TYPE_REASSOC,
+	SMFS_TYPE_DISASSOC_TX,
+	SMFS_TYPE_DISASSOC_RX,
+	SMFS_TYPE_DEAUTH_TX,
+	SMFS_TYPE_DEAUTH_RX,
+	SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+	
+	int16 tx_iqlocal_a;
+	int16 tx_iqlocal_b;
+	int8 tx_iqlocal_ci;
+	int8 tx_iqlocal_cq;
+	int8 tx_iqlocal_di;
+	int8 tx_iqlocal_dq;
+	int8 tx_iqlocal_ei;
+	int8 tx_iqlocal_eq;
+	int8 tx_iqlocal_fi;
+	int8 tx_iqlocal_fq;
+
+	
+	int16 rx_iqcal_a;
+	int16 rx_iqcal_b;
+
+	uint8 tx_iqlocal_pwridx; 
+	uint32 papd_epsilon_table[64]; 
+	int16 papd_epsilon_offset; 
+	uint8 curr_tx_pwrindex; 
+	int8 idle_tssi; 
+	int8 est_tx_pwr; 
+	int8 est_rx_pwr; 
+	uint16 rx_gaininfo; 
+	uint16 init_gaincode; 
+	int8 estirr_tx;
+	int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+	int version;
+	int8 num_phy_cores; 
+	int8 curr_temperature; 
+	chanspec_t chspec; 
+	bool aci_state; 
+	uint16 crsminpower; 
+	uint16 crsminpowerl; 
+	uint16 crsminpoweru; 
+	wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif 
+
+#ifdef WLP2P
+
+typedef struct wl_p2p_disc_st {
+	uint8 state;    
+	chanspec_t chspec;  
+	uint16 dwell;   
+} wl_p2p_disc_st_t;
+
+
+#define WL_P2P_DISC_ST_SCAN 0
+#define WL_P2P_DISC_ST_LISTEN   1
+#define WL_P2P_DISC_ST_SEARCH   2
+
+
+typedef struct wl_p2p_scan {
+	uint8 type;     
+	uint8 reserved[3];
+	
+} wl_p2p_scan_t;
+
+
+typedef struct wl_p2p_if {
+	struct ether_addr addr;
+	uint8 type; 
+	chanspec_t chspec;  
+} wl_p2p_if_t;
+
+
+#define WL_P2P_IF_CLIENT    0
+#define WL_P2P_IF_GO        1
+#define WL_P2P_IF_DYNBCN_GO     2
+#define WL_P2P_IF_DEV       3
+
+
+typedef struct wl_p2p_ifq {
+	uint bsscfgidx;
+	char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+
+typedef struct wl_p2p_ops {
+	uint8 ops;  
+	uint8 ctw;  
+} wl_p2p_ops_t;
+
+
+typedef struct wl_p2p_sched_desc {
+	uint32 start;
+	uint32 interval;
+	uint32 duration;
+	uint32 count;   
+} wl_p2p_sched_desc_t;
+
+
+#define WL_P2P_SCHED_RSVD   0
+#define WL_P2P_SCHED_REPEAT 255 
+
+typedef struct wl_p2p_sched {
+	uint8 type; 
+	uint8 action;   
+	uint8 option;   
+	wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+#define WL_P2P_SCHED_FIXED_LEN      3
+
+
+#define WL_P2P_SCHED_TYPE_ABS       0   
+#define WL_P2P_SCHED_TYPE_REQ_ABS   1   
+
+
+#define WL_P2P_SCHED_ACTION_NONE    0   
+#define WL_P2P_SCHED_ACTION_DOZE    1   
+
+#define WL_P2P_SCHED_ACTION_GOOFF   2   
+
+#define WL_P2P_SCHED_ACTION_RESET   255 
+
+
+#define WL_P2P_SCHED_OPTION_NORMAL  0   
+#define WL_P2P_SCHED_OPTION_BCNPCT  1   
+
+#define WL_P2P_SCHED_OPTION_TSFOFS  2   
+
+
+#define WL_P2P_FEAT_GO_CSA      (1 << 0)        
+#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1)        
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2)  
+#endif 
+
+
+#define BCM_ACTION_RFAWARE      0x77
+#define BCM_ACTION_RFAWARE_DCS  0x01
+
+
+
+#define WL_11N_2x2          1
+#define WL_11N_3x3          3
+#define WL_11N_4x4          4
+
+
+#define WLFEATURE_DISABLE_11N       0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX   0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX   0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX    0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX    0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX  0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX  0x00000040
+#define WLFEATURE_DISABLE_11N_GF    0x00000080
+
+
+#define LQ_IDX_LAST             3
+#define MCS_INDEX_SIZE          33
+
+#define LQ_IDX_MIN              0
+#define LQ_IDX_MAX              1
+#define LQ_IDX_AVG              2
+#define LQ_IDX_SUM              2
+#define LQ_IDX_LAST             3
+#define LQ_STOP_MONITOR         0
+#define LQ_START_MONITOR        1
+
+#define LINKQUAL_V1     0x01
+
+struct  wl_lq {
+	int32 enable;
+	int32 rssi[LQ_IDX_LAST];    
+	int32 rssicnt;
+	int32 snr[LQ_IDX_LAST];     
+	uint32 nsamples;            
+	uint8 isvalid;              
+	uint8 version;
+}; 
+
+typedef struct wl_lq wl_lq_t;
+typedef struct wl_lq  wl_lq_stats_t;
+
+typedef struct {
+	struct  ether_addr ea;  
+	uint8   ac_cat; 
+	uint8   num_pkts;   
+} wl_mac_ratehisto_cmd_t;   
+
+
+typedef struct {
+	uint32  rate[WLC_MAXRATE + 1];  
+	uint32  mcs_index[MCS_INDEX_SIZE];  
+	uint32  tsf_timer[2][2];    
+} wl_mac_ratehisto_res_t;   
+
+#ifdef PROP_TXSTATUS
+
+
+#define WLFC_FLAGS_RSSI_SIGNALS                         1
+
+
+#define WLFC_FLAGS_XONXOFF_SIGNALS                      2
+
+
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS        4
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE     8
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE     16
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE	32
+#endif 
+
+#define BTA_STATE_LOG_SZ    64
+
+
+enum {
+	HCIReset = 1,
+	HCIReadLocalAMPInfo,
+	HCIReadLocalAMPASSOC,
+	HCIWriteRemoteAMPASSOC,
+	HCICreatePhysicalLink,
+	HCIAcceptPhysicalLinkRequest,
+	HCIDisconnectPhysicalLink,
+	HCICreateLogicalLink,
+	HCIAcceptLogicalLink,
+	HCIDisconnectLogicalLink,
+	HCILogicalLinkCancel,
+	HCIAmpStateChange,
+	HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+	uint32 txfifobmp;
+	uint32 hwtxfifoflush;
+	struct ether_addr ea;
+} flush_txfifo_t;
+
+#define CHANNEL_5G_LOW_START    36      
+#define CHANNEL_5G_MID_START    52      
+#define CHANNEL_5G_HIGH_START   100     
+#define CHANNEL_5G_UPPER_START  149     
+
+enum {
+	SPATIAL_MODE_2G_IDX = 0,
+	SPATIAL_MODE_5G_LOW_IDX,
+	SPATIAL_MODE_5G_MID_IDX,
+	SPATIAL_MODE_5G_HIGH_IDX,
+	SPATIAL_MODE_5G_UPPER_IDX,
+	SPATIAL_MODE_MAX_IDX
+};
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 0000000..bbb2408
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,912 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c,v 1.168.2.7 2011-01-27 17:01:13 Exp $
+ */
+
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+#ifdef BCMASSERT_LOG
+#include <bcm_assert_log.h>
+#endif
+
+#include <linux/fs.h>
+
+#define PCI_CFG_RETRY 		10
+
+#define OS_HANDLE_MAGIC		0x1234abcd	
+#define BCM_MEM_FILENAME_LEN 	24		
+
+#ifdef DHD_USE_STATIC_BUF
+#define MAX_STATIC_BUF_NUM 16
+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE)
+typedef struct bcm_static_buf {
+	struct semaphore static_sem;
+	unsigned char *buf_ptr;
+	unsigned char buf_use[MAX_STATIC_BUF_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define MAX_STATIC_PKT_NUM 8
+typedef struct bcm_static_pkt {
+	struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM];
+	struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM];
+	struct semaphore osl_pkt_sem;
+	unsigned char pkt_use[MAX_STATIC_PKT_NUM*2];
+} bcm_static_pkt_t;
+static bcm_static_pkt_t *bcm_static_skb = 0;
+#endif 
+
+typedef struct bcm_mem_link {
+	struct bcm_mem_link *prev;
+	struct bcm_mem_link *next;
+	uint	size;
+	int	line;
+	char	file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+	osl_pubinfo_t pub;
+#ifdef CTFPOOL
+	ctfpool_t *ctfpool;
+#endif 
+	uint magic;
+	void *pdev;
+	atomic_t malloced;
+	uint failed;
+	uint bustype;
+	bcm_mem_link_t *dbgmem_list;
+};
+
+
+
+
+uint32 g_assert_type = FALSE;
+
+static int16 linuxbcmerrormap[] =
+{	0, 			
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL,		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-E2BIG,			
+	-E2BIG,			
+	-EBUSY, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EFAULT, 		
+	-ENOMEM, 		
+	-EOPNOTSUPP,		
+	-EMSGSIZE,		
+	-EINVAL,		
+	-EPERM,			
+	-ENOMEM, 		
+	-EINVAL, 		
+	-ERANGE, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL, 		
+	-EINVAL,		
+	-EIO,			
+	-ENODEV,		
+	-EINVAL,		
+	-EIO,			
+	-EIO,			
+	-ENODEV,		
+	-EINVAL,		
+	-ENODATA,		
+
+
+
+#if BCME_LAST != -42
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+	for new error code defined in bcmutils.h"
+#endif
+};
+
+
+int
+osl_error(int bcmerror)
+{
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	
+	return linuxbcmerrormap[-bcmerror];
+}
+
+extern uint8* dhd_os_prealloc(void *osh, int section, int size);
+
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+	osl_t *osh;
+
+	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+	ASSERT(osh);
+
+	bzero(osh, sizeof(osl_t));
+
+	
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+	osh->magic = OS_HANDLE_MAGIC;
+	atomic_set(&osh->malloced, 0);
+	osh->failed = 0;
+	osh->dbgmem_list = NULL;
+	osh->pdev = pdev;
+	osh->pub.pkttag = pkttag;
+	osh->bustype = bustype;
+
+	switch (bustype) {
+		case PCI_BUS:
+		case SI_BUS:
+		case PCMCIA_BUS:
+			osh->pub.mmbus = TRUE;
+			break;
+		case JTAG_BUS:
+		case SDIO_BUS:
+		case USB_BUS:
+		case SPI_BUS:
+		case RPC_BUS:
+			osh->pub.mmbus = FALSE;
+			break;
+		default:
+			ASSERT(FALSE);
+			break;
+	}
+
+#if defined(DHD_USE_STATIC_BUF)
+	if (!bcm_static_buf) {
+		if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
+			STATIC_BUF_TOTAL_LEN))) {
+			printk("can not alloc static buf!\n");
+		}
+		else
+			printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+		sema_init(&bcm_static_buf->static_sem, 1);
+
+
+		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+
+	}
+
+	if (!bcm_static_skb)
+	{
+		int i;
+		void *skb_buff_ptr = 0;
+		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+		skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
+
+		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
+		for (i = 0; i < MAX_STATIC_PKT_NUM*2; i++)
+			bcm_static_skb->pkt_use[i] = 0;
+
+		sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+	}
+#endif 
+
+	return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+	if (osh == NULL)
+		return;
+
+	ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	kfree(osh);
+}
+
+struct sk_buff *osl_alloc_skb(unsigned int len)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+	gfp_t flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+
+	return __dev_alloc_skb(len, flags);
+#else
+	return dev_alloc_skb(len);
+#endif
+}
+
+#ifdef CTFPOOL
+
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+	struct sk_buff *skb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return NULL;
+
+	spin_lock_bh(&osh->ctfpool->lock);
+	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+	
+	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+		spin_unlock_bh(&osh->ctfpool->lock);
+		return NULL;
+	}
+
+	
+	skb = osl_alloc_skb(osh->ctfpool->obj_size);
+	if (skb == NULL) {
+		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+		       osh->ctfpool->obj_size);
+		spin_unlock_bh(&osh->ctfpool->lock);
+		return NULL;
+	}
+
+	
+	skb->next = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = skb;
+	osh->ctfpool->fast_frees++;
+	osh->ctfpool->curr_obj++;
+
+	
+	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+	
+	PKTFAST(osh, skb) = FASTBUF;
+
+	spin_unlock_bh(&osh->ctfpool->lock);
+
+	return skb;
+}
+
+
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	
+	while ((osh->ctfpool->refills > 0) && (thresh--)) {
+		osl_ctfpool_add(osh);
+		osh->ctfpool->refills--;
+	}
+}
+
+
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+	osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
+	ASSERT(osh->ctfpool);
+	bzero(osh->ctfpool, sizeof(ctfpool_t));
+
+	osh->ctfpool->max_obj = numobj;
+	osh->ctfpool->obj_size = size;
+
+	spin_lock_init(&osh->ctfpool->lock);
+
+	while (numobj--) {
+		if (!osl_ctfpool_add(osh))
+			return -1;
+		osh->ctfpool->fast_frees--;
+	}
+
+	return 0;
+}
+
+
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+	struct sk_buff *skb, *nskb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	spin_lock_bh(&osh->ctfpool->lock);
+
+	skb = osh->ctfpool->head;
+
+	while (skb != NULL) {
+		nskb = skb->next;
+		dev_kfree_skb(skb);
+		skb = nskb;
+		osh->ctfpool->curr_obj--;
+	}
+
+	ASSERT(osh->ctfpool->curr_obj == 0);
+	osh->ctfpool->head = NULL;
+	spin_unlock_bh(&osh->ctfpool->lock);
+
+	kfree(osh->ctfpool);
+	osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+	struct bcmstrbuf *bb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+#ifdef DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif 
+
+	bb = b;
+
+	ASSERT((osh != NULL) && (bb != NULL));
+
+	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
+	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+	            osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+
+	
+	if (osh->ctfpool == NULL)
+		return NULL;
+
+	spin_lock_bh(&osh->ctfpool->lock);
+	if (osh->ctfpool->head == NULL) {
+		ASSERT(osh->ctfpool->curr_obj == 0);
+		osh->ctfpool->slow_allocs++;
+		spin_unlock_bh(&osh->ctfpool->lock);
+		return NULL;
+	}
+
+	ASSERT(len <= osh->ctfpool->obj_size);
+
+	
+	skb = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = (void *)skb->next;
+
+	osh->ctfpool->fast_allocs++;
+	osh->ctfpool->curr_obj--;
+	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+	spin_unlock_bh(&osh->ctfpool->lock);
+
+	
+	skb->next = skb->prev = NULL;
+	skb->data = skb->head + 16;
+	skb->tail = skb->head + 16;
+
+	skb->len = 0;
+	skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+	skb->list = NULL;
+#endif
+	atomic_set(&skb->users, 1);
+
+	return skb;
+}
+#endif
+
+
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+
+#ifdef CTFPOOL
+	skb = osl_pktfastget(osh, len);
+	if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
+#else
+	if ((skb = osl_alloc_skb(len))) {
+#endif
+		skb_put(skb, len);
+		skb->priority = 0;
+
+		osh->pub.pktalloced++;
+	}
+
+	return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+	ctfpool_t *ctfpool;
+
+	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+	ASSERT(ctfpool != NULL);
+
+	
+	spin_lock_bh(&ctfpool->lock);
+	skb->next = (struct sk_buff *)ctfpool->head;
+	ctfpool->head = (void *)skb;
+
+	ctfpool->fast_frees++;
+	ctfpool->curr_obj++;
+
+	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+	spin_unlock_bh(&ctfpool->lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+	skb->tstamp.tv.sec = 0;
+#else
+	skb->stamp.tv_sec = 0;
+#endif
+
+	
+	skb->dev = NULL;
+	skb->dst = NULL;
+	memset(skb->cb, 0, sizeof(skb->cb));
+	skb->ip_summed = 0;
+	skb->destructor = NULL;
+}
+#endif 
+
+
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+	struct sk_buff *skb, *nskb;
+
+	skb = (struct sk_buff*) p;
+
+	if (send && osh->pub.tx_fn)
+		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+	
+	while (skb) {
+		nskb = skb->next;
+		skb->next = NULL;
+
+
+#ifdef CTFPOOL
+		if (PKTISFAST(osh, skb))
+			osl_pktfastfree(osh, skb);
+		else {
+#else 
+		{
+#endif 
+
+			if (skb->destructor)
+				
+				dev_kfree_skb_any(skb);
+			else
+				
+				dev_kfree_skb(skb);
+		}
+
+		osh->pub.pktalloced--;
+
+		skb = nskb;
+	}
+}
+
+#ifdef DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+	int i = 0;
+	struct sk_buff *skb;
+
+
+	if (len > (PAGE_SIZE*2))
+	{
+		printk("Do we really need this big skb??\n");
+		return osl_pktget(osh, len);
+	}
+
+
+	down(&bcm_static_skb->osl_pkt_sem);
+	if (len <= PAGE_SIZE)
+	{
+
+		for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+		{
+			if (bcm_static_skb->pkt_use[i] == 0)
+				break;
+		}
+
+		if (i != MAX_STATIC_PKT_NUM)
+		{
+			bcm_static_skb->pkt_use[i] = 1;
+			up(&bcm_static_skb->osl_pkt_sem);
+
+			skb = bcm_static_skb->skb_4k[i];
+			skb->tail = skb->data + len;
+			skb->len = len;
+
+			return skb;
+		}
+	}
+
+
+	for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+	{
+		if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
+			break;
+	}
+
+	if (i != MAX_STATIC_PKT_NUM)
+	{
+		bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
+		up(&bcm_static_skb->osl_pkt_sem);
+		skb = bcm_static_skb->skb_8k[i];
+		skb->tail = skb->data + len;
+		skb->len = len;
+
+		return skb;
+	}
+
+
+	up(&bcm_static_skb->osl_pkt_sem);
+	printk("all static pkt in use!\n");
+	return osl_pktget(osh, len);
+}
+
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+	int i;
+
+	for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+	{
+		if (p == bcm_static_skb->skb_4k[i])
+		{
+			down(&bcm_static_skb->osl_pkt_sem);
+			bcm_static_skb->pkt_use[i] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+
+			return;
+		}
+	}
+
+	for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
+	{
+		if (p == bcm_static_skb->skb_8k[i])
+		{
+			down(&bcm_static_skb->osl_pkt_sem);
+			bcm_static_skb->pkt_use[i + MAX_STATIC_PKT_NUM] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+
+			return;
+		}
+	}
+
+	return osl_pktfree(osh, p, send);
+}
+#endif 
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+	uint val = 0;
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	
+	ASSERT(size == 4);
+
+	do {
+		pci_read_config_dword(osh->pdev, offset, &val);
+		if (val != 0xffffffff)
+			break;
+	} while (retry--);
+
+
+	return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	
+	ASSERT(size == 4);
+
+	do {
+		pci_write_config_dword(osh->pdev, offset, val);
+		if (offset != PCI_BAR0_WIN)
+			break;
+		if (osl_pci_read_config(osh, offset, size) == val)
+			break;
+	} while (retry--);
+
+}
+
+
+uint
+osl_pci_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+
+uint
+osl_pci_slot(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+	void *addr;
+
+	
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh)
+		atomic_add(size, &osh->malloced);
+
+	return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+	if (osh) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+		atomic_sub(size, &osh->malloced);
+	}
+	kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (atomic_read(&osh->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->failed);
+}
+
+
+
+uint
+osl_dma_consistent_align(void)
+{
+	return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
+{
+	uint16 align = (1 << align_bits);
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+		size += align;
+	*alloced = size;
+
+	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+uint BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(char *exp, char *file, int line)
+{
+	char tempbuf[256];
+	char *basename;
+
+	basename = strrchr(file, '/');
+	
+	if (basename)
+		basename++;
+
+	if (!basename)
+		basename = file;
+
+#ifdef BCMASSERT_LOG
+	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+		exp, basename, line);
+
+	bcm_assert_log(tempbuf);
+#endif 
+
+
+}
+#endif 
+
+void
+osl_delay(uint usec)
+{
+	uint d;
+
+	while (usec > 0) {
+		d = MIN(usec, 1000);
+		udelay(d);
+		usec -= d;
+	}
+}
+
+
+
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+	void * p;
+
+	if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
+		return NULL;
+
+#ifdef CTFPOOL
+	if (PKTISFAST(osh, skb)) {
+		ctfpool_t *ctfpool;
+
+		
+		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+		ASSERT(ctfpool != NULL);
+		PKTCLRFAST(osh, p);
+		PKTCLRFAST(osh, skb);
+		ctfpool->refills++;
+	}
+#endif 
+
+	
+	if (osh->pub.pkttag)
+		bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+	
+	osh->pub.pktalloced++;
+	return (p);
+}
+
+
+
+
+
+
+
+void *
+osl_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 0000000..02d1bc0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,992 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c,v 1.687.2.1 2010-11-29 20:21:56 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+                     uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+
+#define	SET_SBREG(sii, r, mask, val)	\
+		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
+#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
+#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+	uint8 tmp;
+	uint32 val, intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	val = R_REG(sii->osh, sbr);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+	uint8 tmp;
+	volatile uint32 dummy;
+	uint32 intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+		dummy = R_REG(sii->osh, sbr);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+	} else
+		W_REG(sii->osh, sbr, v);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+	si_info_t *sii;
+	void *corereg;
+	sbconfig_t *sb;
+	uint origidx, intflag, intr_val = 0;
+
+	sii = SI_INFO(sih);
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+	corereg = si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(corereg != NULL);
+	sb = REGS2SB(corereg);
+	intflag = R_SBREG(sii, &sb->sbflagst);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+
+	return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 vec;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	if (siflag == -1)
+		vec = 0;
+	else
+		vec = 1 << siflag;
+	W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+	uint i;
+
+	for (i = 0; i < sii->numcores; i ++)
+		if (sba == sii->coresba[i])
+			return i;
+	return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+	uint32 sbaddr;
+
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS: {
+		sbconfig_t *sb = REGS2SB(sii->curmap);
+		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+		break;
+	}
+
+	case PCI_BUS:
+		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = 0;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		sbaddr  = (uint32)tmp << 12;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		sbaddr |= (uint32)tmp << 16;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		sbaddr |= (uint32)tmp << 24;
+		break;
+	}
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		sbaddr = (uint32)(uintptr)sii->curmap;
+		break;
+
+
+	default:
+		sbaddr = BADCOREADDR;
+		break;
+	}
+
+	return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint sbidh;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+	sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+	return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+	        (val << SBTML_SICF_SHIFT);
+	W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+		        (val << SBTML_SICF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatelow, w);
+	}
+
+	/* return the new value
+	 * for write operation, the following readback ensures the completion of write opration.
+	 */
+	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+		        (val << SBTMH_SISF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatehigh, w);
+	}
+
+	/* return the new value */
+	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbtmstatelow) &
+	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!sii->regs[coreidx]) {
+			sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		if (regoff >= SBCONFIGOFF) {
+			w = (R_SBREG(sii, r) & ~mask) | val;
+			W_SBREG(sii, r, w);
+		} else {
+			w = (R_REG(sii->osh, r) & ~mask) | val;
+			W_REG(sii->osh, r, w);
+		}
+	}
+
+	/* readback */
+	if (regoff >= SBCONFIGOFF)
+		w = R_SBREG(sii, r);
+	else {
+		if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+		    (coreidx == SI_CC_IDX) &&
+		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+			w = val;
+		} else
+			w = R_REG(sii->osh, r);
+	}
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			sb_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES	2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+	uint next;
+	uint ncc = 0;
+	uint i;
+
+	if (bus >= SB_MAXBUSES) {
+		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+		return 0;
+	}
+	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+	/* Scan all cores on the bus starting from core 0.
+	 * Core addresses must be contiguous on each bus.
+	 */
+	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+		sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+		/* keep and reuse the initial register mapping */
+		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
+			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+			sii->regs[next] = regs;
+		}
+
+		/* change core to 'next' and read its coreid */
+		sii->curmap = _sb_setcoreidx(sii, next);
+		sii->curidx = next;
+
+		sii->coreid[next] = sb_coreid(&sii->pub);
+
+		/* core specific processing... */
+		/* chipc provides # cores */
+		if (sii->coreid[next] == CC_CORE_ID) {
+			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+			uint32 ccrev = sb_corerev(&sii->pub);
+
+			/* determine numcores - this is the total # cores in the chip */
+			if (((ccrev == 4) || (ccrev >= 6)))
+				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+				        CID_CC_SHIFT;
+			else {
+				/* Older chips */
+				uint chip = CHIPID(sii->pub.chip);
+
+				if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
+					numcores = 6;
+				else if (chip == BCM4704_CHIP_ID)
+					numcores = 9;
+				else if (chip == BCM5365_CHIP_ID)
+					numcores = 7;
+				else {
+					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+					          chip));
+					ASSERT(0);
+					numcores = 1;
+				}
+			}
+			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+				sii->pub.issim ? "QT" : ""));
+		}
+		/* scan bridged SB(s) and add results to the end of the list */
+		else if (sii->coreid[next] == OCP_CORE_ID) {
+			sbconfig_t *sb = REGS2SB(sii->curmap);
+			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+			uint nsbcc;
+
+			sii->numcores = next + 1;
+
+			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+				continue;
+			nsbba &= 0xfffff000;
+			if (_sb_coreidx(sii, nsbba) != BADIDX)
+				continue;
+
+			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+			if (sbba == SI_ENUM_BASE)
+				numcores -= nsbcc;
+			ncc += nsbcc;
+		}
+	}
+
+	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+	sii->numcores = i + ncc;
+	return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii;
+	uint32 origsba;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+	/* Save the current core info and validate it later till we know
+	 * for sure what is good and what is bad.
+	 */
+	origsba = _sb_coresba(sii);
+
+	/* scan all SB(s) starting from SI_ENUM_BASE */
+	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	sii->curmap = _sb_setcoreidx(sii, coreidx);
+	sii->curidx = coreidx;
+
+	return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+	uint32 sbaddr = sii->coresba[coreidx];
+	void *regs;
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!sii->regs[coreidx]) {
+			sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(sii->regs[coreidx]));
+		}
+		regs = sii->regs[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+		regs = sii->curmap;
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = (sbaddr >> 12) & 0x0f;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		tmp = (sbaddr >> 16) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		tmp = (sbaddr >> 24) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		regs = sii->curmap;
+		break;
+	}
+	case SPI_BUS:
+	case SDIO_BUS:
+		/* map new one */
+		if (!sii->regs[coreidx]) {
+			sii->regs[coreidx] = (void *)(uintptr)sbaddr;
+			ASSERT(GOODREGS(sii->regs[coreidx]));
+		}
+		regs = sii->regs[coreidx];
+		break;
+
+
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+	sbconfig_t *sb;
+	volatile uint32 *addrm;
+
+	sb = REGS2SB(sii->curmap);
+
+	switch (asidx) {
+	case 0:
+		addrm =  &sb->sbadmatch0;
+		break;
+
+	case 1:
+		addrm =  &sb->sbadmatch1;
+		break;
+
+	case 2:
+		addrm =  &sb->sbadmatch2;
+		break;
+
+	case 3:
+		addrm =  &sb->sbadmatch3;
+		break;
+
+	default:
+		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+		return 0;
+	}
+
+	return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	/* + 1 because of enumeration space */
+	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+
+	sii = SI_INFO(sih);
+
+	origidx = sii->curidx;
+	ASSERT(GOODIDX(origidx));
+
+	INTR_OFF(sii, intr_val);
+
+	/* switch over to chipcommon core if there is one, else use pci */
+	if (sii->pub.ccrev != NOREV) {
+		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+		ASSERT(ccregs != NULL);
+
+		/* do the buffer registers update */
+		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+	} else
+		ASSERT(0);
+
+	/* restore core index */
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/* if core is already in reset, just return */
+	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+		return;
+
+	/* if clocks are not enabled, put into reset and return */
+	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+		goto disable;
+
+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
+	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+		dummy = R_SBREG(sii, &sb->sbimstate);
+		OSL_DELAY(1);
+		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+	}
+
+	/* set reset and reject while enabling the clocks */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_REJ | SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(10);
+
+	/* don't forget to clear the initiator reject bit */
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+	/* leave reset and reject asserted */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	sb_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+
+	/* set reset while enabling the clock and forcing them on throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+		W_SBREG(sii, &sb->sbtmstatehigh, 0);
+	}
+	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+
+	/* leave clock enabled */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ *	SI_BUS => mips
+ *	JTAG_BUS => chipc
+ *	PCI_BUS => pci or pcie
+ *	PCMCIA_BUS => pcmcia
+ *	SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 tmp, ret = 0xffffffff;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	if ((to & ~TO_MASK) != 0)
+		return ret;
+
+	/* Figure out the master core */
+	if (idx == BADIDX) {
+		switch (BUSTYPE(sii->pub.bustype)) {
+		case PCI_BUS:
+			idx = sii->pub.buscoreidx;
+			break;
+		case JTAG_BUS:
+			idx = SI_CC_IDX;
+			break;
+		case PCMCIA_BUS:
+		case SDIO_BUS:
+			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+			break;
+		case SI_BUS:
+			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+			break;
+		default:
+			ASSERT(0);
+		}
+		if (idx == BADIDX)
+			return ret;
+	}
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+	tmp = R_SBREG(sii, &sb->sbimconfiglow);
+	ret = tmp & TO_MASK;
+	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+	sb_commit(sih);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+	uint32 base;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	base = 0;
+
+	if (type == 0) {
+		base = admatch & SBAM_BASE0_MASK;
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE1_MASK;
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE2_MASK;
+	}
+
+	return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+	uint32 size;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	size = 0;
+
+	if (type == 0) {
+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+	}
+
+	return (size);
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 0000000..22aa412
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,1720 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c,v 1.813.2.36 2011-02-10 23:43:55 Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                              uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs);
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	si_info_t *sii;
+
+	/* alloc si_info_t */
+	if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		return (NULL);
+	}
+
+	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+		MFREE(osh, sii, sizeof(si_info_t));
+		return (NULL);
+	}
+	sii->vars = vars ? *vars : NULL;
+	sii->varsz = varsz ? *varsz : 0;
+
+	return (si_t *)sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32	wd_msticks;		/* watchdog timer ticks normalized to ms */
+
+/* generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+	static bool ksii_attached = FALSE;
+
+	if (!ksii_attached) {
+		void *regs;
+		regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+
+		if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+		                SI_BUS, NULL,
+		                osh != SI_OSH ? &ksii.vars : NULL,
+		                osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+			SI_ERROR(("si_kattach: si_doattach failed\n"));
+			REG_UNMAP(regs);
+			return NULL;
+		}
+		REG_UNMAP(regs);
+
+		/* save ticks normalized to ms for si_watchdog_ms() */
+		if (PMUCTL_ENAB(&ksii.pub)) {
+			/* based on 32KHz ILP clock */
+			wd_msticks = 32;
+		} else {
+			wd_msticks = ALP_CLOCK / 1000;
+		}
+
+		ksii_attached = TRUE;
+		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+		        ksii.pub.ccrev, wd_msticks));
+	}
+
+	return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+	/* need to set memseg flag for CF card first before any sb registers access */
+	if (BUSTYPE(bustype) == PCMCIA_BUS)
+		sii->memseg = TRUE;
+
+
+	if (BUSTYPE(bustype) == SDIO_BUS) {
+		int err;
+		uint8 clkset;
+
+		/* Try forcing SDIO core to do ALPAvail request only */
+		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+		if (!err) {
+			uint8 clkval;
+
+			/* If register supported, wait for ALPAvail and then force ALP */
+			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+					PMU_MAX_TRANSITION_DLY);
+				if (!SBSDIO_ALPAV(clkval)) {
+					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+						clkval));
+					return FALSE;
+				}
+				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					clkset, &err);
+				OSL_DELAY(65);
+			}
+		}
+
+		/* Also, disable the extra SDIO pull-ups */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	}
+
+
+	return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs)
+{
+	bool pci, pcie;
+	uint i;
+	uint pciidx, pcieidx, pcirev, pcierev;
+
+	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+	/* get chipcommon chipstatus */
+	if (sii->pub.ccrev >= 11)
+		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+	/* get chipcommon capabilites */
+	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+	/* get chipcommon extended capabilities */
+
+	if (sii->pub.ccrev >= 35)
+		sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+	/* get pmu rev and caps */
+	if (sii->pub.cccaps & CC_CAP_PMU) {
+		sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+	}
+
+	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+		sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+		sii->pub.pmucaps));
+
+	/* figure out bus/orignal core idx */
+	sii->pub.buscoretype = NODEV_CORE_ID;
+	sii->pub.buscorerev = NOREV;
+	sii->pub.buscoreidx = BADIDX;
+
+	pci = pcie = FALSE;
+	pcirev = pcierev = NOREV;
+	pciidx = pcieidx = BADIDX;
+
+	for (i = 0; i < sii->numcores; i++) {
+		uint cid, crev;
+
+		si_setcoreidx(&sii->pub, i);
+		cid = si_coreid(&sii->pub);
+		crev = si_corerev(&sii->pub);
+
+		/* Display cores found */
+		SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+		        i, cid, crev, sii->coresba[i], sii->regs[i]));
+
+		if (BUSTYPE(bustype) == PCI_BUS) {
+			if (cid == PCI_CORE_ID) {
+				pciidx = i;
+				pcirev = crev;
+				pci = TRUE;
+			} else if (cid == PCIE_CORE_ID) {
+				pcieidx = i;
+				pcierev = crev;
+				pcie = TRUE;
+			}
+		} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+		           (cid == PCMCIA_CORE_ID)) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+		          (BUSTYPE(bustype) == SPI_BUS)) &&
+		         ((cid == PCMCIA_CORE_ID) ||
+		          (cid == SDIOD_CORE_ID))) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+
+		/* find the core idx before entering this func. */
+		if ((savewin && (savewin == sii->coresba[i])) ||
+		    (regs == sii->regs[i]))
+			*origidx = i;
+	}
+
+	if (pci) {
+		sii->pub.buscoretype = PCI_CORE_ID;
+		sii->pub.buscorerev = pcirev;
+		sii->pub.buscoreidx = pciidx;
+	} else if (pcie) {
+		sii->pub.buscoretype = PCIE_CORE_ID;
+		sii->pub.buscorerev = pcierev;
+		sii->pub.buscoreidx = pcieidx;
+	}
+
+	SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+	         sii->pub.buscorerev));
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+	    (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+		OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+	 * already running.
+	 */
+	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+			si_core_disable(&sii->pub, 0);
+	}
+
+	/* return to the original core */
+	si_setcoreidx(&sii->pub, *origidx);
+
+	return TRUE;
+}
+
+
+
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	struct si_pub *sih = &sii->pub;
+	uint32 w, savewin;
+	chipcregs_t *cc;
+	char *pvars = NULL;
+	uint origidx;
+
+	ASSERT(GOODREGS(regs));
+
+	bzero((uchar*)sii, sizeof(si_info_t));
+
+	savewin = 0;
+
+	sih->buscoreidx = BADIDX;
+
+	sii->curmap = regs;
+	sii->sdh = sdh;
+	sii->osh = osh;
+
+
+
+	/* find Chipcommon address */
+	if (bustype == PCI_BUS) {
+		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+			savewin = SI_ENUM_BASE;
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		cc = (chipcregs_t *)regs;
+	} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+		cc = (chipcregs_t *)sii->curmap;
+	} else {
+		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+	}
+
+	sih->bustype = bustype;
+	if (bustype != BUSTYPE(bustype)) {
+		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+			bustype, BUSTYPE(bustype)));
+		return NULL;
+	}
+
+	/* bus/core/clk setup for register access */
+	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+		return NULL;
+	}
+
+	/* ChipID recognition.
+	 *   We assume we can read chipid at offset 0 from the regs arg.
+	 *   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+	 *   some way of recognizing them needs to be added here.
+	 */
+	w = R_REG(osh, &cc->chipid);
+	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	/* Might as wll fill in chip id rev & pkg */
+	sih->chip = w & CID_ID_MASK;
+	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+	if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+		>> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+		CST4322_SPROM_PRESENT))) {
+		SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+		(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+		sih->chippkg = BCM4329_182PIN_PKG_ID;
+	}
+
+	sih->issim = IS_SIM(sih->chippkg);
+
+	/* scan for cores */
+	if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+		SI_MSG(("Found chip type SB (0x%08x)\n", w));
+		sb_scan(&sii->pub, regs, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+		SI_MSG(("Found chip type AI (0x%08x)\n", w));
+		/* pass chipc address instead of original core base */
+		ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+		SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+		/* pass chipc address instead of original core base */
+		ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else {
+		SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+		return NULL;
+	}
+	/* no cores found, bail out */
+	if (sii->numcores == 0) {
+		SI_ERROR(("si_doattach: could not find any cores\n"));
+		return NULL;
+	}
+	/* bus/core/clk setup */
+	origidx = SI_CC_IDX;
+	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+		SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+		goto exit;
+	}
+
+	/* assume current core is CC */
+	if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+	                                 (CHIPREV(sii->pub.chiprev) == 0))) {
+
+		if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+			uint clkdiv;
+			clkdiv = R_REG(osh, &cc->clkdiv);
+			/* otp_clk_div is even number, 120/14 < 9mhz */
+			clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+			W_REG(osh, &cc->clkdiv, clkdiv);
+			SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+		}
+		OSL_DELAY(10);
+	}
+
+
+	pvars = NULL;
+
+
+
+		if (sii->pub.ccrev >= 20) {
+			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+			ASSERT(cc != NULL);
+			W_REG(osh, &cc->gpiopullup, 0);
+			W_REG(osh, &cc->gpiopulldown, 0);
+			si_setcoreidx(sih, origidx);
+		}
+
+
+
+
+	return (sii);
+
+exit:
+
+	return NULL;
+}
+
+/* may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+	si_info_t *sii;
+	uint idx;
+
+
+	sii = SI_INFO(sih);
+
+	if (sii == NULL)
+		return;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS)
+		for (idx = 0; idx < SI_MAXCORES; idx++)
+			if (sii->regs[idx]) {
+				REG_UNMAP(sii->regs[idx]);
+				sii->regs[idx] = NULL;
+			}
+
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (sii != &ksii)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sii->osh != NULL) {
+		SI_ERROR(("osh is already set....\n"));
+		ASSERT(!sii->osh);
+	}
+	sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+                          void *intrsenabled_fn, void *intr_arg)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intr_arg = intr_arg;
+	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+	/* save current core id.  when this function called, the current core
+	 * must be the core which provides driver functions(il, et, wl, etc.)
+	 */
+	sii->dev_coreid = sii->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_intflag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return R_REG(sii->osh, ((uint32 *)(uintptr)
+			    (sii->oob_router + OOB_STATUSA)));
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_flag(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_setint(sih, siflag);
+	else
+		ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+	si_info_t *sii;
+	uint idx;
+	uint coreid;
+	uint coreunit;
+	uint i;
+
+	sii = SI_INFO(sih);
+	coreunit = 0;
+
+	idx = sii->curidx;
+
+	ASSERT(GOODREGS(sii->curmap));
+	coreid = si_coreid(sih);
+
+	/* count the cores of our type */
+	for (i = 0; i < idx; i++)
+		if (sii->coreid[i] == coreid)
+			coreunit++;
+
+	return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corevendor(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corerev(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+	si_info_t *sii;
+	uint found;
+	uint i;
+
+	sii = SI_INFO(sih);
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (sii->coreid[i] == coreid) {
+			if (found == coreunit)
+				return (i);
+			found++;
+		}
+
+	return (BADIDX);
+}
+
+/* return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+	return (sii->numcores);
+}
+
+/* return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+
+	return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+	uint idx;
+
+	idx = si_findcoreidx(sih, coreid, coreunit);
+	if (!GOODIDX(idx))
+		return (NULL);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, idx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, coreidx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+	void *cc;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (SI_FAST(sii)) {
+		/* Overloading the origidx variable to remember the coreid,
+		 * this works because the core ids cannot be confused with
+		 * core indices.
+		 */
+		*origidx = coreid;
+		if (coreid == CC_CORE_ID)
+			return (void *)CCREGS_FAST(sii);
+		else if (coreid == sih->buscoretype)
+			return (void *)PCIEREGS(sii);
+	}
+	INTR_OFF(sii, *intr_val);
+	*origidx = sii->curidx;
+	cc = si_setcore(sih, coreid, 0);
+	ASSERT(cc != NULL);
+
+	return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+		return;
+
+	si_setcoreidx(sih, coreid);
+	INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_numaddrspaces(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspace(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspacesize(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_cflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_cflags_wo(sih, mask, val);
+	else
+		ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_sflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_iscoreup(sih);
+	else {
+		ASSERT(0);
+		return FALSE;
+	}
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	/* only for AI back plane chips */
+	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return (ai_wrap_reg(sih, offset, mask, val));
+	return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		return ai_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corereg(sih, coreidx, regoff, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+		ai_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_reset(sih, bits, resetbits);
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+	uint32 cflags;
+	int result = 0;
+
+	/* Read core control flags */
+	cflags = si_core_cflags(sih, 0, 0);
+
+	/* Set bist & fgc */
+	si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+	/* Wait for bist done */
+	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+		result = BCME_ERROR;
+
+	/* Reset core control flags */
+	si_core_cflags(sih, 0xffff, cflags);
+
+	return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+	switch (x) {
+	case CC_F6_2:	return 2;
+	case CC_F6_3:	return 3;
+	case CC_F6_4:	return 4;
+	case CC_F6_5:	return 5;
+	case CC_F6_6:	return 6;
+	case CC_F6_7:	return 7;
+	default:	return 0;
+	}
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+	uint32 n1, n2, clock, m1, m2, m3, mc;
+
+	n1 = n & CN_N1_MASK;
+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+	if (pll_type == PLL_TYPE6) {
+		if (m & CC_T6_MMASK)
+			return CC_T6_M1;
+		else
+			return CC_T6_M0;
+	} else if ((pll_type == PLL_TYPE1) ||
+	           (pll_type == PLL_TYPE3) ||
+	           (pll_type == PLL_TYPE4) ||
+	           (pll_type == PLL_TYPE7)) {
+		n1 = factor6(n1);
+		n2 += CC_F5_BIAS;
+	} else if (pll_type == PLL_TYPE2) {
+		n1 += CC_T2_BIAS;
+		n2 += CC_T2_BIAS;
+		ASSERT((n1 >= 2) && (n1 <= 7));
+		ASSERT((n2 >= 5) && (n2 <= 23));
+	} else if (pll_type == PLL_TYPE5) {
+		return (100000000);
+	} else
+		ASSERT(0);
+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
+	if ((pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE7)) {
+		clock = CC_CLOCK_BASE2 * n1 * n2;
+	} else
+		clock = CC_CLOCK_BASE1 * n1 * n2;
+
+	if (clock == 0)
+		return 0;
+
+	m1 = m & CC_M1_MASK;
+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+	if ((pll_type == PLL_TYPE1) ||
+	    (pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE4) ||
+	    (pll_type == PLL_TYPE7)) {
+		m1 = factor6(m1);
+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+			m2 += CC_F5_BIAS;
+		else
+			m2 = factor6(m2);
+		m3 = factor6(m3);
+
+		switch (mc) {
+		case CC_MC_BYPASS:	return (clock);
+		case CC_MC_M1:		return (clock / m1);
+		case CC_MC_M1M2:	return (clock / (m1 * m2));
+		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
+		case CC_MC_M1M3:	return (clock / (m1 * m3));
+		default:		return (0);
+		}
+	} else {
+		ASSERT(pll_type == PLL_TYPE2);
+
+		m1 += CC_T2_BIAS;
+		m2 += CC_T2M2_BIAS;
+		m3 += CC_T2_BIAS;
+		ASSERT((m1 >= 2) && (m1 <= 7));
+		ASSERT((m2 >= 3) && (m2 <= 10));
+		ASSERT((m3 >= 2) && (m3 <= 7));
+
+		if ((mc & CC_T2MC_M1BYP) == 0)
+			clock /= m1;
+		if ((mc & CC_T2MC_M2BYP) == 0)
+			clock /= m2;
+		if ((mc & CC_T2MC_M3BYP) == 0)
+			clock /= m3;
+
+		return (clock);
+	}
+}
+
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+	uint nb, maxt;
+
+	if (PMUCTL_ENAB(sih)) {
+
+		if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+		    (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+			si_setcore(sih, USB20D_CORE_ID, 0);
+			si_core_disable(sih, 1);
+			si_setcore(sih, CC_CORE_ID, 0);
+		}
+
+		nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+		/* The mips compiler uses the sllv instruction,
+		 * so we specially handle the 32-bit case.
+		 */
+		if (nb == 32)
+			maxt = 0xffffffff;
+		else
+			maxt = ((1 << nb) - 1);
+
+		if (ticks == 1)
+			ticks = 2;
+		else if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+	} else {
+		maxt = (1 << 28) - 1;
+		if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+	}
+}
+
+/* trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+	si_watchdog(sih, wd_msticks * ms);
+}
+
+
+
+
+/* change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+	return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/* mask&set gpiocontrol bits */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioouten);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioout);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already reserved */
+	if (si_gpioreservation & gpio_bitmask)
+		return 0xffffffff;
+	/* set reservation */
+	si_gpioreservation |= gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already released */
+	if (!(si_gpioreservation & gpio_bitmask))
+		return 0xffffffff;
+
+	/* clear reservation */
+	si_gpioreservation &= ~gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+	si_info_t *sii;
+	uint regoff;
+
+	sii = SI_INFO(sih);
+	regoff = 0;
+
+	regoff = OFFSETOF(chipcregs_t, gpioin);
+	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	si_info_t *sii;
+	uint regoff;
+
+	sii = SI_INFO(sih);
+	regoff = 0;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	si_info_t *sii;
+	uint regoff;
+
+	sii = SI_INFO(sih);
+	regoff = 0;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	/* gpio led powersave reg */
+	return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX,
+		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	uint offs;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 20)
+		return 0xffffffff;
+
+	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	uint offs;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	if (regtype == GPIO_REGEVT)
+		offs = OFFSETOF(chipcregs_t, gpioevent);
+	else if (regtype == GPIO_REGEVT_INTMSK)
+		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+	else if (regtype == GPIO_REGEVT_INTPOL)
+		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+	else
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+	bool level, gpio_handler_t cb, void *arg)
+{
+	si_info_t *sii;
+	gpioh_item_t *gi;
+
+	ASSERT(event);
+	ASSERT(cb != NULL);
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return NULL;
+
+	if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+		return NULL;
+
+	bzero(gi, sizeof(gpioh_item_t));
+	gi->event = event;
+	gi->handler = cb;
+	gi->arg = arg;
+	gi->level = level;
+
+	gi->next = sii->gpioh_head;
+	sii->gpioh_head = gi;
+
+	return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+	si_info_t *sii;
+	gpioh_item_t *p, *n;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return;
+
+	ASSERT(sii->gpioh_head != NULL);
+	if ((void*)sii->gpioh_head == gpioh) {
+		sii->gpioh_head = sii->gpioh_head->next;
+		MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+		return;
+	} else {
+		p = sii->gpioh_head;
+		n = p->next;
+		while (n) {
+			if ((void*)n == gpioh) {
+				p->next = n->next;
+				MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+				return;
+			}
+			p = n;
+			n = n->next;
+		}
+	}
+
+	ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+	si_info_t *sii;
+	gpioh_item_t *h;
+	uint32 level = si_gpioin(sih);
+	uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+	uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+	uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+	sii = SI_INFO(sih);
+	for (h = sii->gpioh_head; h != NULL; h = h->next) {
+		if (h->handler) {
+			uint32 status = (h->level ? level : edge) & h->event;
+			uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+			/* polarity bitval is opposite of status bitval */
+			if (status ^ polarity)
+				h->handler(status, h->arg);
+		}
+	}
+
+	si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+	si_info_t *sii;
+	uint offs;
+
+	sii = SI_INFO(sih);
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	offs = OFFSETOF(chipcregs_t, intmask);
+	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/* Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 index, uint8 mem_type)
+{
+	uint banksize, bankinfo;
+	uint bankidx = index | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+	ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+	W_REG(sii->osh, &regs->bankidx, bankidx);
+	bankinfo = R_REG(sii->osh, &regs->bankinfo);
+	banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+	return banksize;
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	sii = SI_INFO(sih);
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	if (!set)
+		*enable = *protect = 0;
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+		uint32 bankidx, bankinfo;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (set) {
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+				if (*enable) {
+					bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+					if (*protect)
+						bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+				}
+				W_REG(sii->osh, &regs->bankinfo, bankinfo);
+			}
+			else if (i == 0) {
+				if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+					*enable = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+						*protect = 1;
+				}
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+	if (si_socdevram_size(sih) > 0)
+		return TRUE;
+	else
+		return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	sii = SI_INFO(sih);
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+/* Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	sii = SI_INFO(sih);
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev == 0)
+		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+	else if (corerev < 3) {
+		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	} else if ((corerev <= 7) || (corerev == 12)) {
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb --;
+		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+		if (lss != 0)
+			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	} else {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+void
+si_btcgpiowar(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx;
+	uint intr_val = 0;
+	chipcregs_t *cc;
+
+	sii = SI_INFO(sih);
+
+	/* Make sure that there is ChipCommon core present &&
+	 * UART_TX is strapped to 1
+	 */
+	if (!(sih->cccaps & CC_CAP_UARTGPIO))
+		return;
+
+	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(cc != NULL);
+
+	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+uint
+si_pll_reset(si_t *sih)
+{
+	uint err = 0;
+
+	return (err);
+}
+
+/* check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+	uint32 w;
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case PCI_BUS:
+		ASSERT(sii->osh != NULL);
+		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((w & 0xFFFF) != VENDOR_BROADCOM)
+			return TRUE;
+		break;
+	}
+	return FALSE;
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 0000000..d80246e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,235 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h,v 1.17.4.3 2010-10-25 16:56:56 Exp $
+ */
+
+#ifndef	_siutils_priv_h_
+#define	_siutils_priv_h_
+
+#define	SI_ERROR(args)
+
+#define	SI_MSG(args)
+
+/* Define SI_VMSG to printf for verbose debugging, but don't check it in */
+#define	SI_VMSG(args)
+
+#define	IS_SIM(chippkg)	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+	void			*arg;
+	bool			level;
+	gpio_handler_t		handler;
+	uint32			event;
+	struct gpioh_item	*next;
+} gpioh_item_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+	struct si_pub pub;		/* back plane public state (must be first field) */
+
+	void	*osh;			/* osl os handle */
+	void	*sdh;			/* bcmsdh handle */
+
+	uint	dev_coreid;		/* the core provides driver functions */
+	void	*intr_arg;		/* interrupt callback function arg */
+	si_intrsoff_t intrsoff_fn;	/* turns chip interrupts off */
+	si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+	si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+	void *pch;			/* PCI/E core handle */
+
+	gpioh_item_t *gpioh_head; 	/* GPIO event handlers list */
+
+	bool	memseg;			/* flag to toggle MEM_SEG register */
+
+	char *vars;
+	uint varsz;
+
+	void	*curmap;		/* current regs va */
+	void	*regs[SI_MAXCORES];	/* other regs va */
+
+	uint	curidx;			/* current core index */
+	uint	numcores;		/* # discovered cores */
+	uint	coreid[SI_MAXCORES];	/* id of each core */
+	uint32	coresba[SI_MAXCORES];	/* backplane address of each core */
+	void	*regs2[SI_MAXCORES];	/* va of each core second register set (usbh20) */
+	uint32	coresba2[SI_MAXCORES];	/* address of each core second register set (usbh20) */
+	uint32	coresba_size[SI_MAXCORES]; /* backplane address space size */
+	uint32	coresba2_size[SI_MAXCORES]; /* second address space size */
+
+	void	*curwrap;		/* current wrapper va */
+	void	*wrappers[SI_MAXCORES];	/* other cores wrapper va */
+	uint32	wrapba[SI_MAXCORES];	/* address of controlling wrapper */
+
+	uint32	cia[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	cib[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	oob_router;		/* oob router registers for axi */
+} si_info_t;
+
+#define	SI_INFO(sih)	(si_info_t *)(uintptr)sih
+
+#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+		ISALIGNED((x), SI_CORE_SIZE))
+#define	GOODREGS(regs)	((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR	0
+#define	GOODIDX(idx)	(((uint)idx) < SI_MAXCORES)
+#define	NOREV		-1		/* Invalid rev */
+
+#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCI_CORE_ID))
+#define PCIE(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE_CORE_ID))
+#define PCMCIA(si)	((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) ||	\
+		     (((si)->pub.buscoretype == PCI_CORE_ID) && (si)->pub.buscorerev >= 13))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+	if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+	if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define	LPOMINFREQ		25000		/* low power oscillator min */
+#define	LPOMAXFREQ		43000		/* low power oscillator max */
+#define	XTALMINFREQ		19800000	/* 20 MHz - 1% */
+#define	XTALMAXFREQ		20200000	/* 20 MHz + 1% */
+#define	PCIMINFREQ		25000000	/* 25 MHz */
+#define	PCIMAXFREQ		34000000	/* 33 MHz + fudge */
+
+#define	ILP_DIV_5MHZ		0		/* ILP = 5 MHz */
+#define	ILP_DIV_1MHZ		4		/* ILP = 1 MHz */
+
+#define PCI_FORCEHT(si)	\
+	(((PCIE(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+	((PCI(si) || PCIE(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+	(PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME	10		/* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME	90		/* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b)  (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif	/* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h
new file mode 100644
index 0000000..c51c68c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/uamp_api.h
@@ -0,0 +1,176 @@
+/*
+ *  Name:       uamp_api.h
+ *
+ *  Description: Universal AMP API
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: uamp_api.h,v 1.2.8.1 2011-02-05 00:16:14 Exp $
+ *
+ */
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+**  Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool	BOOLEAN;
+typedef uint8	UINT8;
+typedef uint16	UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1   1
+#define UAMP_ID_2   2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY           0   /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED       1   /* Controller removed */
+#define UAMP_EVT_CTLR_READY         2   /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD            0   /* HCI Command channel */
+#define UAMP_CH_HCI_EVT            1   /* HCI Event channel */
+#define UAMP_CH_HCI_DATA           2   /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+    tUAMP_CH channel;       /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters   amp_id:         AMP device identifier that generated the event
+**              amp_evt:        event id
+**              p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+**  external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function:    UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters   p_cback:    Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer. This value
+**                      will be included in AMP messages sent to the
+**                      BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer to write
+**              num_bytes:  number of bytes to write
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns:     number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+**              UAMP_EVT_RX_READY callback event.
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer for holding incoming AMP data
+**              buf_size:   size of p_buf
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns:     number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 0000000..14bd828
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,591 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ *
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <bcmsdbus.h>
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/platform_device.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+#include <linux/wlan_plat.h>
+#else
+#include <linux/wifi_tiwlan.h>
+#endif
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START		"START"
+#define CMD_STOP		"STOP"
+#define	CMD_SCAN_ACTIVE		"SCAN-ACTIVE"
+#define	CMD_SCAN_PASSIVE	"SCAN-PASSIVE"
+#define CMD_RSSI		"RSSI"
+#define CMD_LINKSPEED		"LINKSPEED"
+#define CMD_RXFILTER_START	"RXFILTER-START"
+#define CMD_RXFILTER_STOP	"RXFILTER-STOP"
+#define CMD_RXFILTER_ADD	"RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE	"RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START	"BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP	"BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE		"BTCOEXMODE"
+#define CMD_SETSUSPENDOPT	"SETSUSPENDOPT"
+#define CMD_SETFWPATH		"SETFWPATH"
+#define CMD_SETBAND		"SETBAND"
+#define CMD_GETBAND		"GETBAND"
+
+typedef struct android_wifi_priv_cmd {
+	char *buf;
+	int used_len;
+	int total_len;
+} android_wifi_priv_cmd;
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+void dhd_customer_gpio_wlan_ctrl(int onoff);
+uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+void dhd_dev_init_ioctl(struct net_device *dev);
+int net_os_set_dtim_skip(struct net_device *dev, int val);
+int net_os_set_suspend_disable(struct net_device *dev, int val);
+int net_os_set_suspend(struct net_device *dev, int val);
+
+extern bool ap_fw_loaded;
+#ifdef CUSTOMER_HW2
+extern char iface_name[IFNAMSIZ];
+#endif
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+	int link_speed;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_link_speed(net, &link_speed);
+	if (error)
+		return -1;
+
+	/* Convert Kbps to Android Mbps */
+	link_speed = link_speed / 1000;
+	bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+	DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+	return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+	wlc_ssid_t ssid;
+	int rssi;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_rssi(net, &rssi);
+	if (error)
+		return -1;
+
+	error = wldev_get_ssid(net, &ssid);
+	if (error)
+		return -1;
+	memcpy(command, ssid.SSID, ssid.SSID_len);
+	bytes_written = ssid.SSID_len;
+	bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+	DHD_INFO(("%s: command result is %s \n", __FUNCTION__, command));
+	return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+	suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+	ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+	if (ret_now != suspend_flag) {
+		if (!(ret = net_os_set_suspend(dev, ret_now)))
+			DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+					__FUNCTION__, ret_now, suspend_flag));
+		else
+			DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+	}
+	return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+	uint band;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_band(dev, &band);
+	if (error)
+		return -1;
+	bytes_written = snprintf(command, total_len, "Band %d", band);
+	return bytes_written;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+	int ret = 0;
+
+	printk("%s in\n", __FUNCTION__);
+	if (!dev) {
+		DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (!g_wifi_on) {
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+		sdioh_start(NULL, 0);
+		ret = dhd_dev_reset(dev, FALSE);
+		sdioh_start(NULL, 1);
+		dhd_dev_init_ioctl(dev);
+		g_wifi_on = 1;
+	}
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev)
+{
+	int ret = 0;
+
+	printk("%s in\n", __FUNCTION__);
+	if (!dev) {
+		DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (g_wifi_on) {
+		dhd_dev_reset(dev, 1);
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+		sdioh_stop(NULL);
+		/* clean up dtim_skip setting */
+		net_os_set_dtim_skip(dev, TRUE);
+		g_wifi_on = 0;
+	}
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+	if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+		return -1;
+	bcm_strncpy_s(fw_path, sizeof(fw_path),
+		command + strlen(CMD_SETFWPATH) + 1, MOD_PARAM_PATHLEN - 1);
+	if (strstr(fw_path, "apsta") != NULL) {
+		DHD_INFO(("GOT APSTA FIRMWARE\n"));
+		ap_fw_loaded = TRUE;
+	} else {
+		DHD_INFO(("GOT STA FIRMWARE\n"));
+		ap_fw_loaded = FALSE;
+	}
+	return 0;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+	char *command = NULL;
+	int bytes_written = 0;
+	android_wifi_priv_cmd *priv_cmd;
+
+	/* net_os_wake_lock(dev); */
+
+	priv_cmd = (android_wifi_priv_cmd*)ifr->ifr_data;
+	if (!priv_cmd)
+	{
+		ret = -EINVAL;
+		goto exit;
+	}
+	command = kmalloc(priv_cmd->total_len, GFP_KERNEL);
+	if (!command)
+	{
+		DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (copy_from_user(command, priv_cmd->buf, priv_cmd->total_len)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+	if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+		DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+		bytes_written = wl_android_wifi_on(net);
+	}
+	else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+		bytes_written = wl_android_set_fwpath(net, command, priv_cmd->total_len);
+	}
+
+	if (!g_wifi_on) {
+		DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+				__FUNCTION__, command, ifr->ifr_name));
+		ret = 0;
+		goto exit;
+	}
+
+	if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+		bytes_written = wl_android_wifi_off(net);
+	}
+	else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+		/* TBD: SCAN-ACTIVE */
+	}
+	else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+		/* TBD: SCAN-PASSIVE */
+	}
+	else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+		bytes_written = wl_android_get_rssi(net, command, priv_cmd->total_len);
+	}
+	else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+		bytes_written = wl_android_get_link_speed(net, command, priv_cmd->total_len);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+		bytes_written = net_os_set_packet_filter(net, 1);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+		bytes_written = net_os_set_packet_filter(net, 0);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+	}
+	else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+		/* TBD: BTCOEXSCAN-START */
+	}
+	else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+		/* TBD: BTCOEXSCAN-STOP */
+	}
+	else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+		/* TBD: BTCOEXMODE */
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+		bytes_written = wl_android_set_suspendopt(net, command, priv_cmd->total_len);
+	}
+	else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+		uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+		bytes_written = wldev_set_band(net, band);
+	}
+	else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+		bytes_written = wl_android_get_band(net, command, priv_cmd->total_len);
+	} else {
+		DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+		snprintf(command, 3, "OK");
+		bytes_written = strlen("OK");
+	}
+
+	if (bytes_written > 0) {
+		bytes_written++;
+		priv_cmd->used_len = bytes_written;
+		if (copy_to_user(priv_cmd->buf, command, bytes_written)) {
+			DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+			ret = -EFAULT;
+		}
+	} else {
+		ret = bytes_written;
+	}
+
+exit:
+	/* net_os_wake_unlock(dev); */
+	if (command) {
+		kfree(command);
+	}
+
+	return ret;
+}
+
+int wl_android_init(void)
+{
+	int ret = 0;
+
+	dhd_msg_level = DHD_ERROR_VAL;
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+	dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#ifdef CUSTOMER_HW2
+	if (!iface_name[0])
+		bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+#endif /* CUSTOMER_HW2 */
+	return ret;
+}
+
+int wl_android_exit(void)
+{
+	int ret = 0;
+
+	return ret;
+}
+
+
+/**
+ * Functions for Android WiFi card detection
+ */
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+
+static int g_wifidev_registered = 0;
+static struct semaphore wifi_control_sem;
+static struct wifi_platform_data *wifi_control_data = NULL;
+static struct resource *wifi_irqres = NULL;
+
+static int wifi_add_dev(void);
+static void wifi_del_dev(void);
+
+int wl_android_wifictrl_func_add(void)
+{
+	int ret = 0;
+	sema_init(&wifi_control_sem, 0);
+
+	ret = wifi_add_dev();
+	if (ret) {
+		DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
+		return ret;
+	}
+	g_wifidev_registered = 1;
+
+	/* Waiting callback after platform_driver_register is done or exit with error */
+	if (down_timeout(&wifi_control_sem,  msecs_to_jiffies(1000)) != 0) {
+		ret = -EINVAL;
+		DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
+	}
+
+	return ret;
+}
+
+void wl_android_wifictrl_func_del(void)
+{
+	if (g_wifidev_registered)
+	{
+		wifi_del_dev();
+		g_wifidev_registered = 0;
+	}
+}
+
+void* wl_android_prealloc(int section, unsigned long size)
+{
+	void *alloc_ptr = NULL;
+	if (wifi_control_data && wifi_control_data->mem_prealloc) {
+		alloc_ptr = wifi_control_data->mem_prealloc(section, size);
+		if (alloc_ptr) {
+			DHD_INFO(("success alloc section %d\n", section));
+			bzero(alloc_ptr, size);
+			return alloc_ptr;
+		}
+	}
+
+	DHD_ERROR(("can't alloc section %d\n", section));
+	return 0;
+}
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr)
+{
+	if (wifi_irqres) {
+		*irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
+		return (int)wifi_irqres->start;
+	}
+#ifdef CUSTOM_OOB_GPIO_NUM
+	return CUSTOM_OOB_GPIO_NUM;
+#else
+	return -1;
+#endif
+}
+
+int wifi_set_power(int on, unsigned long msec)
+{
+	DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+	if (wifi_control_data && wifi_control_data->set_power) {
+		wifi_control_data->set_power(on);
+	}
+	if (msec)
+		mdelay(msec);
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+int wifi_get_mac_addr(unsigned char *buf)
+{
+	DHD_ERROR(("%s\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+	if (wifi_control_data && wifi_control_data->get_mac_addr) {
+		return wifi_control_data->get_mac_addr(buf);
+	}
+	return -EOPNOTSUPP;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+void *wifi_get_country_code(char *ccode)
+{
+	DHD_TRACE(("%s\n", __FUNCTION__));
+	if (!ccode)
+		return NULL;
+	if (wifi_control_data && wifi_control_data->get_country_code) {
+		return wifi_control_data->get_country_code(ccode);
+	}
+	return NULL;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+static int wifi_set_carddetect(int on)
+{
+	DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+	if (wifi_control_data && wifi_control_data->set_carddetect) {
+		wifi_control_data->set_carddetect(on);
+	}
+	return 0;
+}
+
+static int wifi_probe(struct platform_device *pdev)
+{
+	struct wifi_platform_data *wifi_ctrl =
+		(struct wifi_platform_data *)(pdev->dev.platform_data);
+
+	DHD_ERROR(("## %s\n", __FUNCTION__));
+	wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+	if (wifi_irqres == NULL)
+		wifi_irqres = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, "bcm4329_wlan_irq");
+	wifi_control_data = wifi_ctrl;
+
+	wifi_set_power(1, 0);	/* Power On */
+	wifi_set_carddetect(1);	/* CardDetect (0->1) */
+
+	up(&wifi_control_sem);
+	return 0;
+}
+
+static int wifi_remove(struct platform_device *pdev)
+{
+	struct wifi_platform_data *wifi_ctrl =
+		(struct wifi_platform_data *)(pdev->dev.platform_data);
+
+	DHD_ERROR(("## %s\n", __FUNCTION__));
+	wifi_control_data = wifi_ctrl;
+
+	wifi_set_power(0, 0);	/* Power Off */
+	wifi_set_carddetect(0);	/* CardDetect (1->0) */
+
+	up(&wifi_control_sem);
+	return 0;
+}
+
+static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static int wifi_resume(struct platform_device *pdev)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static struct platform_driver wifi_device = {
+	.probe          = wifi_probe,
+	.remove         = wifi_remove,
+	.suspend        = wifi_suspend,
+	.resume         = wifi_resume,
+	.driver         = {
+	.name   = "bcmdhd_wlan",
+	}
+};
+
+static struct platform_driver wifi_device_legacy = {
+	.probe          = wifi_probe,
+	.remove         = wifi_remove,
+	.suspend        = wifi_suspend,
+	.resume         = wifi_resume,
+	.driver         = {
+	.name   = "bcm4329_wlan",
+	}
+};
+
+static int wifi_add_dev(void)
+{
+	DHD_TRACE(("## Calling platform_driver_register\n"));
+	platform_driver_register(&wifi_device);
+	platform_driver_register(&wifi_device_legacy);
+	return 0;
+}
+
+static void wifi_del_dev(void)
+{
+	DHD_TRACE(("## Unregister platform_driver_register\n"));
+	platform_driver_unregister(&wifi_device);
+	platform_driver_unregister(&wifi_device_legacy);
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 0000000..3c75bfb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,39 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_android.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 linm Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+int wl_android_wifictrl_func_add(void);
+void wl_android_wifictrl_func_del(void);
+void* wl_android_prealloc(int section, unsigned long size);
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
\ No newline at end of file
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 0000000..ef5cd47
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,5899 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+
+#include <bcmutils.h>
+#include <bcmwifi.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+
+#include <net/rtnetlink.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/firmware.h>
+#include <bcmsdbus.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+static struct sdio_func *cfg80211_sdio_func;
+static struct wl_dev *wl_cfg80211_dev;
+
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define WL_4329_FW_FILE "brcm/bcm4329-fullmac-4-218-248-5.bin"
+#define WL_4329_NVRAM_FILE "brcm/bcm4329-fullmac-4-218-248-5.txt"
+#define WL_TRACE(a) printk("%s ", __FUNCTION__); printk a
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAX_WAIT_TIME 3000
+static s8 ioctlbuf[WLC_IOCTL_MAXLEN];
+
+#ifdef CONFIG_SYSCTL
+#define MAC_STRING_LEN (sizeof(u8) * 17)
+u8 wl_sysctl_macstring[2][MAC_STRING_LEN];
+
+static ctl_table wl_sysctl_child[] = {
+	{
+	.procname = "p2p_dev_addr",
+	.data = &wl_sysctl_macstring[0],
+	.maxlen = MAC_STRING_LEN,
+	.mode =  0444,
+	.child = NULL,
+	.proc_handler = proc_dostring,
+	},
+	{
+	.procname = "p2p_int_addr",
+	.data = &wl_sysctl_macstring[1],
+	.maxlen = MAC_STRING_LEN,
+	.mode =  0444,
+	.child = NULL,
+	.proc_handler = proc_dostring,
+	},
+	{0}
+};
+static ctl_table wl_sysctl_table[] = {
+	{
+	.procname = "wifi",
+	.data = NULL,
+	.maxlen = 0,
+	.mode =  0555,
+	.child = wl_sysctl_child,
+	.proc_handler = NULL,
+	},
+	{0}
+};
+static struct ctl_table_header *wl_sysctl_hdr;
+#endif /* CONFIG_SYSCTL */ 
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid);
+static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request);
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+	struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac,
+	struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+	struct net_device *dev, bool enabled,
+	s32 timeout);
+static s32 wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
+	struct net_device *dev,
+	const u8 *addr,
+	const struct cfg80211_bitrate_mask *mask);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code);
+static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type,
+	s32 dbm);
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	void *cookie, void (*callback) (void *cookie,
+	struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev,	u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+	struct net_device *dev);
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct wl_priv *wl);
+static void wl_destroy_event_handler(struct wl_priv *wl);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct wl_priv *wl);
+static void wl_flush_eq(struct wl_priv *wl);
+static void wl_lock_eq(struct wl_priv *wl);
+static void wl_unlock_eq(struct wl_priv *wl);
+static void wl_init_eq_lock(struct wl_priv *wl);
+static void wl_init_event_handler(struct wl_priv *wl);
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl);
+static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type,
+	const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct wl_priv *wl);
+static s32 wl_notify_connect_status(struct wl_priv *wl,
+	struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct wl_priv *wl,
+	struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+
+/*
+ * register/deregister sdio function
+ */
+struct sdio_func *wl_cfg80211_get_sdio_func(void);
+static void wl_clear_sdio_func(void);
+
+/*
+ * ioctl utilites
+ */
+static s32 wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
+	s32 buf_len);
+static __used s32 wl_dev_bufvar_set(struct net_device *dev, s8 *name,
+	s8 *buf, s32 len);
+static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val);
+static s32 wl_dev_intvar_get(struct net_device *dev, s8 *name,
+	s32 *retval);
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * wl profile utilities
+ */
+static s32 wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e,
+	void *data, s32 item);
+static void *wl_read_prof(struct wl_priv *wl, s32 item);
+static void wl_init_prof(struct wl_profile *prof);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+	struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct wl_priv *wl);
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v);
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct wl_priv *wl);
+
+static s32 wl_mode_to_nl80211_iftype(s32 mode);
+
+static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
+	struct device *dev);
+static void wl_free_wdev(struct wl_priv *wl);
+
+static s32 wl_inform_bss(struct wl_priv *wl);
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi);
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev);
+
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr,
+	struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * wl_priv memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct wl_priv *wl);
+static void wl_deinit_priv_mem(struct wl_priv *wl);
+
+static void wl_delay(u32 ms);
+
+/*
+ * store/restore cfg80211 instance data
+ */
+static void wl_set_drvdata(struct wl_dev *dev, void *data);
+static void *wl_get_drvdata(struct wl_dev *dev);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct wl_priv *wl);
+
+/*
+ * dongle up/down , default configuration utilities
+ */
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev);
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e);
+static void wl_link_up(struct wl_priv *wl);
+static void wl_link_down(struct wl_priv *wl);
+static s32 wl_dongle_mode(struct wl_priv *wl, struct net_device *ndev, s32 iftype);
+static s32 __wl_cfg80211_up(struct wl_priv *wl);
+static s32 __wl_cfg80211_down(struct wl_priv *wl);
+static s32 wl_dongle_probecap(struct wl_priv *wl);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_dongle_eventmsg(struct net_device *ndev);
+
+/*
+ * dongle configuration utilities
+ */
+#ifndef EMBEDDED_PLATFORM
+static s32 wl_dongle_country(struct net_device *ndev, u8 ccode);
+static s32 wl_dongle_up(struct net_device *ndev, u32 up);
+static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode);
+static s32 wl_dongle_glom(struct net_device *ndev, u32 glom,
+	u32 dongle_align);
+static s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar,
+	u32 bcn_timeout);
+static s32 wl_dongle_eventmsg(struct net_device *ndev);
+static s32 wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+	s32 scan_unassoc_time);
+static s32 wl_dongle_offload(struct net_device *ndev, s32 arpoe,
+	s32 arp_ol);
+static s32 wl_pattern_atoh(s8 *src, s8 *dst);
+static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode);
+static s32 wl_update_wiphybands(struct wl_priv *wl);
+#endif				/* !EMBEDDED_PLATFORM */
+static s32 wl_config_dongle(struct wl_priv *wl, bool need_lock);
+
+/*
+ * iscan handler
+ */
+static void wl_iscan_timer(unsigned long data);
+static void wl_term_iscan(struct wl_priv *wl);
+static s32 wl_init_iscan(struct wl_priv *wl);
+static s32 wl_iscan_thread(void *data);
+static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid,
+	u16 action);
+static s32 wl_do_iscan(struct wl_priv *wl);
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan);
+static s32 wl_invoke_iscan(struct wl_priv *wl);
+static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+	struct wl_scan_results **bss_list);
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted);
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan);
+static s32 wl_iscan_done(struct wl_priv *wl);
+static s32 wl_iscan_pending(struct wl_priv *wl);
+static s32 wl_iscan_inprogress(struct wl_priv *wl);
+static s32 wl_iscan_aborted(struct wl_priv *wl);
+
+/*
+ * fw/nvram downloading handler
+ */
+static void wl_init_fw(struct wl_fw_ctrl *fw);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * update pmklist to dongle
+ */
+static __used s32 wl_update_pmklist(struct net_device *dev,
+	struct wl_pmk_list *pmk_list, s32 err);
+
+/*
+ * debufs support
+ */
+static int wl_debugfs_add_netdev_params(struct wl_priv *wl);
+static void wl_debugfs_remove_netdev(struct wl_priv *wl);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct wl_priv *wl);
+static int wl_rfkill_set(void *data, bool blocked);
+
+#define WL_PRIV_GET() 							\
+	({								\
+	struct wl_iface *ci = NULL;					\
+	if (unlikely(!(wl_cfg80211_dev && 				\
+		(ci = wl_get_drvdata(wl_cfg80211_dev))))) {		\
+		WL_ERR(("wl_cfg80211_dev is unavailable\n"));		\
+		BUG();							\
+	} 								\
+	ci_to_wl(ci);							\
+})
+
+#define CHECK_SYS_UP()							\
+do {									\
+	struct wl_priv *wl = WL_PRIV_GET();			\
+	if (unlikely(!test_bit(WL_STATUS_READY, &wl->status))) {	\
+		WL_INFO(("device is not ready : status (%d)\n",		\
+			(int)wl->status));				\
+		return -EIO;						\
+	}								\
+} while (0)
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX	50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+	"SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+	"DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+	"REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+	"BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+	"TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+	"EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+	"BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+	"PFN_NET_LOST",
+	"RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+	"IBSS_ASSOC",
+	"RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+	"PROBREQ_MSG",
+	"SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+	"EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+	"UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+	"WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+	"RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+	"ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+	"WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+	"WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+	"WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+	"WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+	"WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif				/* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) {			\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel, _flags) {				\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+	{								\
+		.bitrate	= RATE_TO_BASE100KBPS(_rateid),     \
+		.hw_value	= (_rateid),			    \
+		.flags	  = (_flags),			     \
+	}
+
+static struct ieee80211_rate __wl_rates[] = {
+	RATETAB_ENT(WLC_RATE_1M, 0),
+	RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(WLC_RATE_6M, 0),
+	RATETAB_ENT(WLC_RATE_9M, 0),
+	RATETAB_ENT(WLC_RATE_12M, 0),
+	RATETAB_ENT(WLC_RATE_18M, 0),
+	RATETAB_ENT(WLC_RATE_24M, 0),
+	RATETAB_ENT(WLC_RATE_36M, 0),
+	RATETAB_ENT(WLC_RATE_48M, 0),
+	RATETAB_ENT(WLC_RATE_54M, 0)
+};
+
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size	8
+#define wl_g_rates		(__wl_rates + 0)
+#define wl_g_rates_size	12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412, 0),
+	CHAN2G(2, 2417, 0),
+	CHAN2G(3, 2422, 0),
+	CHAN2G(4, 2427, 0),
+	CHAN2G(5, 2432, 0),
+	CHAN2G(6, 2437, 0),
+	CHAN2G(7, 2442, 0),
+	CHAN2G(8, 2447, 0),
+	CHAN2G(9, 2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+	CHAN5G(34, 0), CHAN5G(36, 0),
+	CHAN5G(38, 0), CHAN5G(40, 0),
+	CHAN5G(42, 0), CHAN5G(44, 0),
+	CHAN5G(46, 0), CHAN5G(48, 0),
+	CHAN5G(52, 0), CHAN5G(56, 0),
+	CHAN5G(60, 0), CHAN5G(64, 0),
+	CHAN5G(100, 0), CHAN5G(104, 0),
+	CHAN5G(108, 0), CHAN5G(112, 0),
+	CHAN5G(116, 0), CHAN5G(120, 0),
+	CHAN5G(124, 0), CHAN5G(128, 0),
+	CHAN5G(132, 0), CHAN5G(136, 0),
+	CHAN5G(140, 0), CHAN5G(149, 0),
+	CHAN5G(153, 0), CHAN5G(157, 0),
+	CHAN5G(161, 0), CHAN5G(165, 0),
+	CHAN5G(184, 0), CHAN5G(188, 0),
+	CHAN5G(192, 0), CHAN5G(196, 0),
+	CHAN5G(200, 0), CHAN5G(204, 0),
+	CHAN5G(208, 0), CHAN5G(212, 0),
+	CHAN5G(216, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_n_channels[] = {
+	CHAN5G(32, 0), CHAN5G(34, 0),
+	CHAN5G(36, 0), CHAN5G(38, 0),
+	CHAN5G(40, 0), CHAN5G(42, 0),
+	CHAN5G(44, 0), CHAN5G(46, 0),
+	CHAN5G(48, 0), CHAN5G(50, 0),
+	CHAN5G(52, 0), CHAN5G(54, 0),
+	CHAN5G(56, 0), CHAN5G(58, 0),
+	CHAN5G(60, 0), CHAN5G(62, 0),
+	CHAN5G(64, 0), CHAN5G(66, 0),
+	CHAN5G(68, 0), CHAN5G(70, 0),
+	CHAN5G(72, 0), CHAN5G(74, 0),
+	CHAN5G(76, 0), CHAN5G(78, 0),
+	CHAN5G(80, 0), CHAN5G(82, 0),
+	CHAN5G(84, 0), CHAN5G(86, 0),
+	CHAN5G(88, 0), CHAN5G(90, 0),
+	CHAN5G(92, 0), CHAN5G(94, 0),
+	CHAN5G(96, 0), CHAN5G(98, 0),
+	CHAN5G(100, 0), CHAN5G(102, 0),
+	CHAN5G(104, 0), CHAN5G(106, 0),
+	CHAN5G(108, 0), CHAN5G(110, 0),
+	CHAN5G(112, 0), CHAN5G(114, 0),
+	CHAN5G(116, 0), CHAN5G(118, 0),
+	CHAN5G(120, 0), CHAN5G(122, 0),
+	CHAN5G(124, 0), CHAN5G(126, 0),
+	CHAN5G(128, 0), CHAN5G(130, 0),
+	CHAN5G(132, 0), CHAN5G(134, 0),
+	CHAN5G(136, 0), CHAN5G(138, 0),
+	CHAN5G(140, 0), CHAN5G(142, 0),
+	CHAN5G(144, 0), CHAN5G(145, 0),
+	CHAN5G(146, 0), CHAN5G(147, 0),
+	CHAN5G(148, 0), CHAN5G(149, 0),
+	CHAN5G(150, 0), CHAN5G(151, 0),
+	CHAN5G(152, 0), CHAN5G(153, 0),
+	CHAN5G(154, 0), CHAN5G(155, 0),
+	CHAN5G(156, 0), CHAN5G(157, 0),
+	CHAN5G(158, 0), CHAN5G(159, 0),
+	CHAN5G(160, 0), CHAN5G(161, 0),
+	CHAN5G(162, 0), CHAN5G(163, 0),
+	CHAN5G(164, 0), CHAN5G(165, 0),
+	CHAN5G(166, 0), CHAN5G(168, 0),
+	CHAN5G(170, 0), CHAN5G(172, 0),
+	CHAN5G(174, 0), CHAN5G(176, 0),
+	CHAN5G(178, 0), CHAN5G(180, 0),
+	CHAN5G(182, 0), CHAN5G(184, 0),
+	CHAN5G(186, 0), CHAN5G(188, 0),
+	CHAN5G(190, 0), CHAN5G(192, 0),
+	CHAN5G(194, 0), CHAN5G(196, 0),
+	CHAN5G(198, 0), CHAN5G(200, 0),
+	CHAN5G(202, 0), CHAN5G(204, 0),
+	CHAN5G(206, 0), CHAN5G(208, 0),
+	CHAN5G(210, 0), CHAN5G(212, 0),
+	CHAN5G(214, 0), CHAN5G(216, 0),
+	CHAN5G(218, 0), CHAN5G(220, 0),
+	CHAN5G(222, 0), CHAN5G(224, 0),
+	CHAN5G(226, 0), CHAN5G(228, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+	.channels = __wl_2ghz_channels,
+	.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+	.bitrates = wl_g_rates,
+	.n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+	.band = IEEE80211_BAND_5GHZ,
+	.channels = __wl_5ghz_a_channels,
+	.n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_n = {
+	.band = IEEE80211_BAND_5GHZ,
+	.channels = __wl_5ghz_n_channels,
+	.n_channels = ARRAY_SIZE(__wl_5ghz_n_channels),
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+	WLAN_CIPHER_SUITE_AES_CMAC
+};
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+	[NL80211_IFTYPE_ADHOC] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_AP] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_AP_VLAN] = {
+		/* copy AP */
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	}
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* For debug: Dump the contents of the encoded wps ie buffe */
+static void
+wl_dbg_dump_wps_ie(char *wps_ie)
+{
+	#define WPS_IE_FIXED_LEN 6
+	u16 len = (u16) wps_ie[TLV_LEN_OFF];
+	u8 *subel = wps_ie+  WPS_IE_FIXED_LEN;
+	u16 subelt_id;
+	u16 subelt_len;
+	u16 val;
+	u8 *valptr = (uint8*) &val;
+
+	WL_DBG(("wps_ie len=%d\n", len));
+
+	len -= 4;	/* for the WPS IE's OUI, oui_type fields */
+
+	while (len >= 4) {		/* must have attr id, attr len fields */
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_id = HTON16(val);
+
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_len = HTON16(val);
+
+		len -= 4;			/* for the attr id, attr len fields */
+		len -= subelt_len;	/* for the remaining fields in this attribute */
+		WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+			subel, subelt_id, subelt_len));
+
+		if (subelt_id == WPS_ID_VERSION) {
+			WL_DBG(("  attr WPS_ID_VERSION: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_REQ_TYPE) {
+			WL_DBG(("  attr WPS_ID_REQ_TYPE: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_DEVICE_NAME) {
+			char devname[100];
+			memcpy(devname, subel, subelt_len);
+			devname[subelt_len] = '\0';
+			WL_DBG(("  attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+				devname, subelt_len));
+		} else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else {
+			WL_DBG(("  unknown attr 0x%x\n", subelt_id));
+		}
+
+		subel += subelt_len;
+	}
+}
+
+static struct net_device *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, char *name,
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 err;
+	s32 timeout = -1;
+	s32 wlif_type;
+	s32 index = 0;
+	s32 mode = 0;
+	chanspec_t chspec;
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct net_device *_ndev;
+	dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+
+	WL_DBG(("if name: %s, type: %d\n", name, type));
+	switch (type) {
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		WL_ERR(("Unsupported interface type\n"));
+		mode = WL_MODE_IBSS;
+		return NULL;
+	case NL80211_IFTYPE_MONITOR:
+		return  wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_STATION:
+		wlif_type = WL_P2P_IF_CLIENT;
+		mode = WL_MODE_BSS;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_AP:
+		wlif_type = WL_P2P_IF_GO;
+		mode = WL_MODE_AP;
+		break;
+	default:
+		WL_ERR(("Unsupported interface type\n"));
+		return NULL;
+		break;
+	}
+
+	if (!name) {
+		WL_ERR(("name is NULL\n"));
+		return NULL;
+	}
+
+	if (wl_get_p2p_status(wl, IF_DELETING) == 1) {
+		/* wait till IF_DEL is complete
+		 * release the lock for the unregister to proceed
+		 */
+		rtnl_unlock();
+		WL_INFO(("%s: Released the lock and wait till IF_DEL is complete\n", __func__));
+		timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+			(wl_get_p2p_status(wl, IF_DELETING) == FALSE),
+			msecs_to_jiffies(MAX_WAIT_TIME));
+
+		/* put back the rtnl_lock again */
+		rtnl_lock();
+		if (timeout > 0) {
+			WL_ERR(("IF DEL is Success\n"));
+
+		} else {
+			WL_ERR(("%s: timeount < 0, return -EAGAIN\n", __func__));
+			return ERR_PTR(-EAGAIN);
+		}
+	}
+	if (!p2p_on(wl) && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+		p2p_on(wl) = true;
+		wl_cfgp2p_init_discovery(wl);
+	}
+
+	memset(wl->p2p.vir_ifname, 0, IFNAMSIZ);
+	strncpy(wl->p2p.vir_ifname, name, IFNAMSIZ - 1);
+	wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p.dev_addr, &wl->p2p.int_addr);
+
+	/* Temporary use channel 11, in case GO will be changed with set_channel API  */
+	chspec = wf_chspec_aton(WL_P2P_TEMP_CHAN);
+
+	/* For P2P mode, use P2P-specific driver features to create the
+	 * bss: "wl p2p_ifadd"
+	 */
+	wl_set_p2p_status(wl, IF_ADD);
+	err = wl_cfgp2p_ifadd(wl, &wl->p2p.int_addr, htod32(wlif_type), chspec);
+
+	if (unlikely(err))
+		return ERR_PTR(-ENOMEM);
+
+	timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+		(wl_get_p2p_status(wl, IF_ADD) == FALSE),
+		msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) {
+
+		struct wireless_dev *vwdev;
+		vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+		if (unlikely(!vwdev)) {
+			WL_ERR(("Could not allocate wireless device\n"));
+			return ERR_PTR(-ENOMEM);
+		}
+		vwdev->wiphy = wl->wdev->wiphy;
+		WL_INFO((" virtual interface(%s) is created \n", wl->p2p.vir_ifname));
+		index = alloc_idx_vwdev(wl);
+		wl->vwdev[index] = vwdev;
+		vwdev->iftype =
+			(wlif_type == WL_P2P_IF_CLIENT) ? NL80211_IFTYPE_STATION
+			: NL80211_IFTYPE_AP;
+		_ndev =  wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+		_ndev->ieee80211_ptr = vwdev;
+		SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy));
+		vwdev->netdev = _ndev;
+		set_bit(WL_STATUS_READY, &wl->status);
+		wl->p2p.vif_created = TRUE;
+		set_mode_by_netdev(wl, _ndev, mode);
+		wl = wdev_to_wl(vwdev);
+		return _ndev;
+
+	} else {
+		wl_clr_p2p_status(wl, IF_ADD);
+		WL_ERR((" virtual interface(%s) is not created timeout=%d\n",
+			wl->p2p.vir_ifname, timeout));
+		memset(wl->p2p.vir_ifname, '\0', IFNAMSIZ);
+		wl->p2p.vif_created = FALSE;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct ether_addr p2p_mac;
+	struct wl_priv *wl = WL_PRIV_GET();
+	memcpy(p2p_mac.octet, wl->p2p.int_addr.octet, ETHER_ADDR_LEN);
+	if (wl->p2p.vif_created) {
+		if (test_bit(WL_STATUS_SCANNING, &wl->status)) {
+			wl_cfg80211_scan_abort(wl, dev);
+		}
+		wl_cfgp2p_ifdel(wl, &p2p_mac);
+		WL_ERR(("ifdel command sent to Firmware.. "
+				"and we just come out without waiting.."));
+		wl_set_p2p_status(wl, IF_DELETING);
+
+	}
+
+	WL_DBG(("Done\n"));
+	return 0;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 ap = 0;
+	s32 infra = 0;
+	s32 err = BCME_OK;
+	s32 timeout = -1;
+	s32 wlif_type;
+	s32 mode = 0;
+	chanspec_t chspec;
+	struct wl_priv *wl = WL_PRIV_GET();
+	CHECK_SYS_UP();
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		ap = 1;
+		WL_ERR(("type (%d) : currently we do not support this type\n",
+			type));
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		ap = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+
+	if (ap && wl->p2p.vif_created) {
+		WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p.vif_created,
+			p2p_on(wl)));
+		chspec = wf_chspec_aton(WL_P2P_TEMP_CHAN);
+		wlif_type = ap ? WL_P2P_IF_GO : WL_P2P_IF_CLIENT;
+		WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n", ndev->name, ap, infra, type));
+		wl_set_p2p_status(wl, IF_CHANGING);
+		wl_clr_p2p_status(wl, IF_CHANGED);
+		err = wl_cfgp2p_ifchange(wl, &wl->p2p.int_addr, htod32(wlif_type), chspec);
+		timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+			(wl_get_p2p_status(wl, IF_CHANGED) == TRUE),
+			msecs_to_jiffies(MAX_WAIT_TIME));
+		set_mode_by_netdev(wl, ndev, mode);
+		wl_clr_p2p_status(wl, IF_CHANGING);
+		wl_clr_p2p_status(wl, IF_CHANGED);
+	}
+
+	ndev->ieee80211_ptr->iftype = type;
+	return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *net)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 ret = BCME_OK;
+	if (!net || !net->name) {
+		WL_ERR(("net is NULL\n"));
+		return 0;
+	}
+
+	WL_DBG(("IF_ADD event called from dongle, old interface name: %s, new name: %s\n",
+		net->name, wl->p2p.vir_ifname));
+	/* Assign the net device to CONNECT BSSCFG */
+	strncpy(net->name, wl->p2p.vir_ifname, IFNAMSIZ - 1);
+	wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = net;
+	wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = P2PAPI_BSSCFG_CONNECTION;
+	wl_clr_p2p_status(wl, IF_ADD);
+	wake_up_interruptible(&wl->dongle_event_wait);
+	return ret;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *net)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+
+	if (!net || !net->name) {
+		WL_DBG(("net is NULL\n"));
+		return 0;
+	}
+
+	WL_DBG(("IF_DEL event called from dongle, _net name: %s, vif name: %s\n",
+		net->name, wl->p2p.vir_ifname));
+	if (wl->p2p.vif_created) {
+		s32 index = 0;
+		memset(wl->p2p.vir_ifname, '\0', IFNAMSIZ);
+		index = wl_cfgp2p_find_idx(wl, net);
+		wl_to_p2p_bss_ndev(wl, index) = NULL;
+		wl_to_p2p_bss_bssidx(wl, index) = 0;
+		wl->p2p.vif_created = FALSE;
+		set_mode_by_netdev(wl, net, -1);
+		wl_cfgp2p_clear_management_ie(wl,
+			index);
+		index = get_idx_vwdev_by_netdev(wl, net);
+		WL_DBG(("index : %d\n", index));
+		if (index >= 0) {
+				free_vwdev_by_index(wl, index);
+		}
+		/* Another option.. Make the IF_ADD wait, if the IF_DEL is not complete.. */
+		wl_clr_p2p_status(wl, IF_DELETING);
+		WL_ERR(("Cleared IF_DELETING status bit\n"));
+		wake_up_interruptible(&wl->dongle_event_wait);
+		WL_ERR(("Cleared IF_DELETING status bit DONE WAKEUP Interruptible\n"));
+	}
+	return 0;
+}
+
+s32
+wl_cfg80211_is_progress_ifadd(void)
+{
+	s32 is_progress = 0;
+	struct wl_priv *wl = WL_PRIV_GET();
+	if (wl_get_p2p_status(wl, IF_ADD))
+		is_progress = 1;
+	return is_progress;
+}
+
+s32
+wl_cfg80211_is_progress_ifchange(void)
+{
+	s32 is_progress = 0;
+	struct wl_priv *wl = WL_PRIV_GET();
+	if (wl_get_p2p_status(wl, IF_CHANGING))
+		is_progress = 1;
+	return is_progress;
+}
+
+
+s32
+wl_cfg80211_notify_ifchange(void)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	if (wl_get_p2p_status(wl, IF_CHANGING)) {
+		wl_set_p2p_status(wl, IF_CHANGED);
+		wake_up_interruptible(&wl->dongle_event_wait);
+	}
+	return 0;
+}
+
+static void wl_iscan_prep(struct wl_scan_params *params, struct wlc_ssid *ssid)
+{
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+}
+
+static s32
+wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid, u16 action)
+{
+	s32 params_size =
+		(WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
+	struct wl_iscan_params *params;
+	s32 err = 0;
+
+	if (ssid && ssid->SSID_len)
+		params_size += sizeof(struct wlc_ssid);
+	params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
+	if (unlikely(!params))
+		return -ENOMEM;
+	memset(params, 0, params_size);
+	BUG_ON(unlikely(params_size >= WLC_IOCTL_SMLEN));
+
+	wl_iscan_prep(&params->params, ssid);
+
+	params->version = htod32(ISCAN_REQ_VERSION);
+	params->action = htod16(action);
+	params->scan_duration = htod16(0);
+
+	/* params_size += offsetof(wl_iscan_params_t, params); */
+	err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+		iscan->ioctl_buf, WLC_IOCTL_SMLEN);
+	if (unlikely(err)) {
+		if (err == -EBUSY) {
+		WL_INFO(("system busy : iscan canceled\n"));
+		} else {
+			WL_ERR(("error (%d)\n", err));
+		}
+	}
+	kfree(params);
+	return err;
+}
+
+static s32 wl_do_iscan(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+	struct net_device *ndev = wl_to_prmry_ndev(wl);
+	struct wlc_ssid ssid;
+	s32 passive_scan;
+	s32 err = 0;
+
+	/* Broadcast scan by default */
+	memset(&ssid, 0, sizeof(ssid));
+
+	iscan->state = WL_ISCAN_STATE_SCANING;
+
+	passive_scan = wl->active_scan ? 0 : 1;
+	err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+		&passive_scan, sizeof(passive_scan), FALSE);
+	if (unlikely(err)) {
+		WL_DBG(("error (%d)\n", err));
+		return err;
+	}
+	wl->iscan_kickstart = true;
+	wl_run_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+	iscan->timer_on = 1;
+
+	return err;
+}
+
+static s32
+wl_run_escan(struct wl_priv *wl, struct net_device *ndev, wlc_ssid_t *ssid, uint16 action)
+{
+	s32 err = BCME_OK;
+	s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+	wl_escan_params_t *params;
+	struct cfg80211_scan_request *scan_request = wl->scan_request;
+	u32 num_chans = 0;
+	s32 search_state = WL_P2P_DISC_ST_SCAN;
+	u32 i;
+	u16 *default_chan_list = NULL;
+	WL_DBG(("Enter \n"));
+
+
+	if ((ndev == wl_to_prmry_ndev(wl)) &&
+		!p2p_scan(wl)) {
+		/* LEGACY SCAN TRIGGER */
+		WL_DBG(("LEGACY SCAN START\n"));
+		if (ssid && ssid->SSID_len) {
+			params_size += sizeof(wlc_ssid_t);
+		}
+		params = (wl_escan_params_t *) kmalloc(params_size, GFP_KERNEL);
+
+		if (params == NULL)
+			return -ENOMEM;
+
+		memset(params, 0, params_size);
+		memcpy(&params->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+		params->params.bss_type = DOT11_BSSTYPE_ANY;
+		params->params.scan_type = 0;
+		params->params.nprobes = htod32(-1);
+		params->params.active_time = htod32(-1);
+		params->params.passive_time = htod32(-1);
+		params->params.home_time = htod32(-1);
+		params->params.channel_num = 0;
+		if (ssid && ssid->SSID_len) {
+			memcpy(params->params.ssid.SSID, ssid->SSID, ssid->SSID_len);
+			params->params.ssid.SSID_len = htod32(ssid->SSID_len);
+		}
+		params->version = htod32(ESCAN_REQ_VERSION);
+		params->action =  htod16(action);
+		params->sync_id = htod16(0x1234);
+		wldev_iovar_setbuf(ndev, "escan", params, params_size,
+			wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN);
+		kfree(params);
+	}
+	else if (p2p_on(wl) && p2p_scan(wl)) {
+		/* P2P SCAN TRIGGER */
+		if (scan_request && scan_request->n_channels) {
+			num_chans = scan_request->n_channels;
+			WL_INFO((" chann number : %d\n", num_chans));
+			default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+				GFP_KERNEL);
+			if (default_chan_list == NULL) {
+				WL_ERR(("channel list allocation failed \n"));
+				err = -ENOMEM;
+				goto exit;
+			}
+			for (i = 0; i < num_chans; i++)
+			{
+				default_chan_list[i] =
+				ieee80211_frequency_to_channel(
+					scan_request->channels[i]->center_freq);
+			}
+			if (num_chans == 3 && (
+						(default_chan_list[0] == SOCIAL_CHAN_1) &&
+						(default_chan_list[1] == SOCIAL_CHAN_2) &&
+						(default_chan_list[2] == SOCIAL_CHAN_3))) {
+				/* SOCIAL CHANNELS 1, 6, 11 */
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				WL_INFO(("P2P SEARCH PHASE START \n"));
+			} else {
+				WL_INFO(("P2P SCAN STATE START \n"));
+			}
+
+		}
+		err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list,
+			search_state, action,
+			wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+		kfree(default_chan_list);
+	}
+exit:
+	return err;
+}
+
+
+static s32
+wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev, wlc_ssid_t *ssid)
+{
+	s32 err = BCME_OK;
+	s32 passive_scan;
+	wl_scan_results_t *results;
+	WL_DBG(("Enter \n"));
+
+	wl->escan_info.wiphy = wiphy;
+	wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+	passive_scan = wl->active_scan ? 0 : 1;
+	err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+		&passive_scan, sizeof(passive_scan), FALSE);
+	if (unlikely(err)) {
+		WL_DBG(("error (%d)\n", err));
+		return err;
+	}
+	results = (wl_scan_results_t *) wl->escan_info.escan_buf;
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+	wl_run_escan(wl, ndev, ssid, WL_SCAN_ACTION_START);
+	return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct cfg80211_ssid *ssids;
+	struct wl_scan_req *sr = wl_to_sr(wl);
+	wlc_ssid_t ssid_info;
+	s32 passive_scan;
+	bool iscan_req;
+	bool escan_req;
+	bool spec_scan;
+	s32 err = 0;
+
+	if (unlikely(test_bit(WL_STATUS_SCANNING, &wl->status))) {
+		WL_ERR(("Scanning already : status (%d)\n", (int)wl->status));
+		return -EAGAIN;
+	}
+	if (unlikely(test_bit(WL_STATUS_SCAN_ABORTING, &wl->status))) {
+		WL_ERR(("Scanning being aborted : status (%d)\n",
+			(int)wl->status));
+		return -EAGAIN;
+	}
+
+	WL_DBG(("wiphy (%p)\n", wiphy));
+
+	iscan_req = false;
+	spec_scan = false;
+	if (request) {		/* scan bss */
+		ssids = request->ssids;
+		if (wl->iscan_on && (!ssids || !ssids->ssid_len)) {
+			iscan_req = true;
+		} else if (wl->escan_on) {
+			escan_req = true;
+			if (ssids->ssid_len && IS_P2P_SSID(ssids->ssid)) {
+				/* p2p scan trigger */
+				if (p2p_on(wl) == false) {
+					/* p2p on at the first time */
+					p2p_on(wl) = true;
+					wl_cfgp2p_set_firm_p2p(wl);
+				}
+
+				p2p_scan(wl) = true;
+
+			} else {
+				/* legacy scan trigger
+				 * So, we have to disable p2p discovery if p2p discovery is on
+				 */
+				p2p_scan(wl) = false;
+				/* If Netdevice is not equals to primary and p2p is on
+				*  , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+							 */
+				if (p2p_on(wl) && (ndev != wl_to_prmry_ndev(wl)))
+					p2p_scan(wl) = true;
+
+				if (p2p_scan(wl) == false) {
+					if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+						err = wl_cfgp2p_discover_enable_search(wl, FALSE);
+						if (unlikely(err)) {
+							goto scan_out;
+						}
+
+					}
+				}
+			}
+		}
+	} else {		/* scan in ibss */
+		/* we don't do iscan in ibss */
+		ssids = this_ssid;
+	}
+	wl->scan_request = request;
+	set_bit(WL_STATUS_SCANNING, &wl->status);
+	if (iscan_req) {
+		err = wl_do_iscan(wl);
+		if (likely(!err))
+			return err;
+		else
+			goto scan_out;
+	} else if (escan_req) {
+		WL_DBG(("ssid \"%s\", ssid_len (%d)\n",
+			ssids->ssid, ssids->ssid_len));
+
+		memcpy(ssid_info.SSID, ssids->ssid, ssids->ssid_len);
+		ssid_info.SSID_len = ssids->ssid_len;
+
+		if (p2p_on(wl) && p2p_scan(wl)) {
+
+			err = wl_cfgp2p_enable_discovery(wl, ndev, request->ie, request->ie_len);
+
+			if (unlikely(err)) {
+				goto scan_out;
+			}
+		}
+		err = wl_do_escan(wl, wiphy, ndev, &ssid_info);
+		if (likely(!err))
+			return err;
+		else
+			goto scan_out;
+
+
+	} else {
+		memset(&sr->ssid, 0, sizeof(sr->ssid));
+		sr->ssid.SSID_len =
+			min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
+		if (sr->ssid.SSID_len) {
+			memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
+			sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
+			WL_DBG(("Specific scan ssid=\"%s\" len=%d\n",
+				sr->ssid.SSID, sr->ssid.SSID_len));
+			spec_scan = true;
+		} else {
+			WL_DBG(("Broadcast scan\n"));
+		}
+		WL_DBG(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
+		passive_scan = wl->active_scan ? 0 : 1;
+		err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+			&passive_scan, sizeof(passive_scan), FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
+			goto scan_out;
+		}
+		err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid,
+			sizeof(sr->ssid), FALSE);
+		if (err) {
+			if (err == -EBUSY) {
+				WL_INFO(("system busy : scan for \"%s\" "
+					"canceled\n", sr->ssid.SSID));
+			} else {
+				WL_ERR(("WLC_SCAN error (%d)\n", err));
+			}
+			goto scan_out;
+		}
+	}
+
+	return 0;
+
+scan_out:
+	clear_bit(WL_STATUS_SCANNING, &wl->status);
+	wl->scan_request = NULL;
+	return err;
+}
+
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+{
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	CHECK_SYS_UP();
+	err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("scan error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
+{
+	s8 buf[WLC_IOCTL_SMLEN];
+	u32 len;
+	s32 err = 0;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	BUG_ON(unlikely(!len));
+
+	err = wldev_ioctl(dev, WLC_SET_VAR, buf, len, FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+	}
+
+	return err;
+}
+
+static s32
+wl_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
+{
+	union {
+		s8 buf[WLC_IOCTL_SMLEN];
+		s32 val;
+	} var;
+	u32 len;
+	u32 data_null;
+	s32 err = 0;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0,
+		(char *)(&var), sizeof(var.buf));
+	BUG_ON(unlikely(!len));
+	err = wldev_ioctl(dev, WLC_GET_VAR, &var, len, FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+	}
+	*retval = dtoh32(var.val);
+
+	return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+	s32 err = 0;
+
+	err = wl_dev_intvar_set(dev, "rtsthresh", rts_threshold);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+	s32 err = 0;
+
+	err = wl_dev_intvar_set(dev, "fragthresh", frag_threshold);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+	s32 err = 0;
+	u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+	retry = htod32(retry);
+	err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+	struct wl_priv *wl = wiphy_to_wl(wiphy);
+	struct net_device *ndev = wl_to_prmry_ndev(wl);
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+		(wl->conf->rts_threshold != wiphy->rts_threshold)) {
+		wl->conf->rts_threshold = wiphy->rts_threshold;
+		err = wl_set_rts(ndev, wl->conf->rts_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+		(wl->conf->frag_threshold != wiphy->frag_threshold)) {
+		wl->conf->frag_threshold = wiphy->frag_threshold;
+		err = wl_set_frag(ndev, wl->conf->frag_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG &&
+		(wl->conf->retry_long != wiphy->retry_long)) {
+		wl->conf->retry_long = wiphy->retry_long;
+		err = wl_set_retry(ndev, wl->conf->retry_long, true);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_SHORT &&
+		(wl->conf->retry_short != wiphy->retry_short)) {
+		wl->conf->retry_short = wiphy->retry_short;
+		err = wl_set_retry(ndev, wl->conf->retry_short, false);
+		if (!err) {
+			return err;
+		}
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct cfg80211_bss *bss;
+	struct ieee80211_channel *chan;
+	struct wl_join_params join_params;
+	struct cfg80211_ssid ssid;
+	s32 scan_retry = 0;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	CHECK_SYS_UP();
+	if (params->bssid) {
+		WL_ERR(("Invalid bssid\n"));
+		return -EOPNOTSUPP;
+	}
+	bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+	if (!bss) {
+		memcpy(ssid.ssid, params->ssid, params->ssid_len);
+		ssid.ssid_len = params->ssid_len;
+		do {
+			if (unlikely
+				(__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+				 -EBUSY)) {
+				wl_delay(150);
+			} else {
+				break;
+			}
+		} while (++scan_retry < WL_SCAN_RETRY_MAX);
+		/* to allow scan_inform to propagate to cfg80211 plane */
+		rtnl_unlock();
+
+		/* wait 4 secons till scan done.... */
+		schedule_timeout_interruptible(4 * HZ);
+		rtnl_lock();
+		bss = cfg80211_get_ibss(wiphy, NULL,
+			params->ssid, params->ssid_len);
+	}
+	if (bss) {
+		wl->ibss_starter = false;
+		WL_DBG(("Found IBSS\n"));
+	} else {
+		wl->ibss_starter = true;
+	}
+	chan = params->channel;
+	if (chan)
+		wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+	/*
+	 * Join with specific BSSID and cached SSID
+	 * If SSID is zero join based on BSSID only
+	 */
+	memset(&join_params, 0, sizeof(join_params));
+	memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+		params->ssid_len);
+	join_params.ssid.SSID_len = htod32(params->ssid_len);
+	if (params->bssid)
+		memcpy(&join_params.params.bssid, params->bssid,
+			ETHER_ADDR_LEN);
+	else
+		memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+		sizeof(join_params), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	wl_link_down(wl);
+
+	return err;
+}
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+		val = WPA_AUTH_PSK; /* | WPA_AUTH_UNSPECIFIED; */
+	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+		val = WPA2_AUTH_PSK; /* | WPA2_AUTH_UNSPECIFIED ; */
+	else
+		val = WPA_AUTH_DISABLED;
+
+	if (is_wps_conn(sme))
+		val = WPA_AUTH_DISABLED;
+
+	WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wpa_auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(wl, WL_PROF_SEC);
+	sec->wpa_versions = sme->crypto.wpa_versions;
+	return err;
+}
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+	switch (sme->auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+		val = 0;
+		WL_DBG(("open system\n"));
+		break;
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		val = 1;
+		WL_DBG(("shared key\n"));
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		val = 2;
+		WL_DBG(("automatic\n"));
+		break;
+	case NL80211_AUTHTYPE_NETWORK_EAP:
+		WL_DBG(("network eap\n"));
+	default:
+		val = 2;
+		WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+		break;
+	}
+
+	err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(wl, WL_PROF_SEC);
+	sec->auth_type = sme->auth_type;
+	return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_security *sec;
+	s32 pval = 0;
+	s32 gval = 0;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	if (sme->crypto.n_ciphers_pairwise) {
+		switch (sme->crypto.ciphers_pairwise[0]) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			pval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			pval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			pval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("invalid cipher pairwise (%d)\n",
+				sme->crypto.ciphers_pairwise[0]));
+			return -EINVAL;
+		}
+	}
+	if (sme->crypto.cipher_group) {
+		switch (sme->crypto.cipher_group) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			gval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			gval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			gval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("invalid cipher group (%d)\n",
+				sme->crypto.cipher_group));
+			return -EINVAL;
+		}
+	}
+
+	WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+	if (is_wps_conn(sme)) {
+		err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+	} else {
+		err = wldev_iovar_setint_bsscfg(dev, "wsec", pval | gval, bssidx);
+	}
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	sec = wl_read_prof(wl, WL_PROF_SEC);
+	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+	sec->cipher_group = sme->crypto.cipher_group;
+
+	return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	if (sme->crypto.n_akm_suites) {
+		err = wl_dev_intvar_get(dev, "wpa_auth", &val);
+		if (unlikely(err)) {
+			WL_ERR(("could not get wpa_auth (%d)\n", err));
+			return err;
+		}
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+
+		WL_DBG(("setting wpa_auth to %d\n", val));
+
+		err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("could not set wpa_auth (%d)\n", err));
+			return err;
+		}
+	}
+	sec = wl_read_prof(wl, WL_PROF_SEC);
+	sec->wpa_auth = sme->crypto.akm_suites[0];
+
+	return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_security *sec;
+	struct wl_wsec_key key;
+	s32 val;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	WL_DBG(("key len (%d)\n", sme->key_len));
+	if (sme->key_len) {
+		sec = wl_read_prof(wl, WL_PROF_SEC);
+		WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+			sec->wpa_versions, sec->cipher_pairwise));
+		if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+			NL80211_WPA_VERSION_2)) &&
+			(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+			WLAN_CIPHER_SUITE_WEP104))) {
+			memset(&key, 0, sizeof(key));
+			key.len = (u32) sme->key_len;
+			key.index = (u32) sme->key_idx;
+			if (unlikely(key.len > sizeof(key.data))) {
+				WL_ERR(("Too long key length (%u)\n", key.len));
+				return -EINVAL;
+			}
+			memcpy(key.data, sme->key, key.len);
+			key.flags = WL_PRIMARY_KEY;
+			switch (sec->cipher_pairwise) {
+			case WLAN_CIPHER_SUITE_WEP40:
+				key.algo = CRYPTO_ALGO_WEP1;
+				break;
+			case WLAN_CIPHER_SUITE_WEP104:
+				key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			default:
+				WL_ERR(("Invalid algorithm (%d)\n",
+					sme->crypto.ciphers_pairwise[0]));
+				return -EINVAL;
+			}
+			/* Set the new key/index */
+			WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+				key.len, key.index, key.algo));
+			WL_DBG(("key \"%s\"\n", key.data));
+			swap_key_from_BE(&key);
+			err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+				ioctlbuf, sizeof(ioctlbuf), bssidx);
+			if (unlikely(err)) {
+				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				return err;
+			}
+			if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
+				WL_DBG(("set auth_type to shared key\n"));
+				val = 1;	/* shared key */
+				err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+				if (unlikely(err)) {
+					WL_ERR(("set auth failed (%d)\n", err));
+					return err;
+				}
+			}
+		}
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct ieee80211_channel *chan = sme->channel;
+	struct wl_join_params join_params;
+	size_t join_params_size;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	CHECK_SYS_UP();
+	if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) {
+		/* we only allow to connect using virtual interface in case of P2P */
+		if (p2p_on(wl) && is_wps_conn(sme)) {
+			WL_DBG(("p2p index : %d\n", wl_cfgp2p_find_idx(wl, dev)));
+			/* Have to apply WPS IE + P2P IE in assoc req frame */
+			wl_cfgp2p_set_managment_ie(wl, dev,
+				wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG,
+				wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie,
+				wl_to_p2p_bss_saved_ie(wl,
+				P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len);
+			wl_cfgp2p_set_managment_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+				VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+		} else if (p2p_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
+			/* This is the connect req after WPS is done [credentials exchanged] 
+			 * currently identified with WPA_VERSION_2 .
+			 * Update the previously set IEs with
+			 * the newly received IEs from Supplicant. This will remove the WPS IE from
+			 * the Assoc Req.
+			 */
+			wl_cfgp2p_set_managment_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+				VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+		}
+
+	} else {
+		WL_ERR(("No P2PIE in beacon \n"));
+	}
+
+	if (unlikely(!sme->ssid)) {
+		WL_ERR(("Invalid ssid\n"));
+		return -EOPNOTSUPP;
+	}
+	if (chan) {
+		wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+		WL_DBG(("channel (%d), center_req (%d)\n", wl->channel,
+			chan->center_freq));
+	}
+	WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+	err = wl_set_wpa_version(dev, sme);
+	if (unlikely(err))
+		return err;
+
+	err = wl_set_auth_type(dev, sme);
+	if (unlikely(err))
+		return err;
+
+	err = wl_set_set_cipher(dev, sme);
+	if (unlikely(err))
+		return err;
+
+	err = wl_set_key_mgmt(dev, sme);
+	if (unlikely(err))
+		return err;
+
+	err = wl_set_set_sharedkey(dev, sme);
+	if (unlikely(err))
+		return err;
+
+	wl_update_prof(wl, NULL, sme->bssid, WL_PROF_BSSID);
+	/*
+	 *  Join with specific BSSID and cached SSID
+	 *  If SSID is zero join based on BSSID only
+	 */
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+	memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+	wl_update_prof(wl, NULL, &join_params.ssid, WL_PROF_SSID);
+	memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+
+	wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
+	WL_DBG(("join_param_size %d\n", join_params_size));
+
+	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_ERR(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+			join_params.ssid.SSID_len));
+	}
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	set_bit(WL_STATUS_CONNECTING, &wl->status);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	scb_val_t scbval;
+	bool act = false;
+	s32 err = 0;
+
+	WL_ERR(("Reason %d\n\n\n", reason_code));
+	CHECK_SYS_UP();
+	act = *(bool *) wl_read_prof(wl, WL_PROF_ACT);
+	if (likely(act)) {
+		scbval.val = reason_code;
+		memcpy(&scbval.ea, &wl->bssid, ETHER_ADDR_LEN);
+		scbval.val = htod32(scbval.val);
+		err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+			sizeof(scb_val_t), FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+			return err;
+		}
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm)
+{
+
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct net_device *ndev = wl_to_prmry_ndev(wl);
+	u16 txpwrmw;
+	s32 err = 0;
+	s32 disable = 0;
+
+	CHECK_SYS_UP();
+	switch (type) {
+	case NL80211_TX_POWER_AUTOMATIC:
+		break;
+	case NL80211_TX_POWER_LIMITED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+			return -EINVAL;
+		}
+		break;
+	case NL80211_TX_POWER_FIXED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+			return -EINVAL;
+		}
+		break;
+	}
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = WL_RADIO_SW_DISABLE << 16;
+	disable = htod32(disable);
+	err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+		return err;
+	}
+
+	if (dbm > 0xffff)
+		txpwrmw = 0xffff;
+	else
+		txpwrmw = (u16) dbm;
+	err = wl_dev_intvar_set(ndev, "qtxpower",
+		(s32) (bcm_mw_to_qdbm(txpwrmw)));
+	if (unlikely(err)) {
+		WL_ERR(("qtxpower error (%d)\n", err));
+		return err;
+	}
+	wl->conf->tx_power = dbm;
+
+	return err;
+}
+
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct net_device *ndev = wl_to_prmry_ndev(wl);
+	s32 txpwrdbm;
+	u8 result;
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	err = wl_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	*dbm = (s32) bcm_qdbm_to_mw(result);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	u32 index;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	CHECK_SYS_UP();
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	if (wsec & WEP_ENABLED) {
+		/* Just select a new current key */
+		index = (u32) key_idx;
+		index = htod32(index);
+		err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+			sizeof(index), FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+		}
+	}
+	return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_wsec_key key;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	memset(&key, 0, sizeof(key));
+	key.index = (u32) key_idx;
+
+	if (!ETHER_ISMULTI(mac_addr))
+		memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+	key.len = (u32) params->key_len;
+
+	/* check for key index change */
+	if (key.len == 0) {
+		/* key delete */
+		swap_key_from_BE(&key);
+		wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+			sizeof(ioctlbuf), bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("key delete error (%d)\n", err));
+			return err;
+		}
+	} else {
+		if (key.len > sizeof(key.data)) {
+			WL_ERR(("Invalid key length (%d)\n", key.len));
+			return -EINVAL;
+		}
+
+		WL_DBG(("Setting the key index %d\n", key.index));
+		memcpy(key.data, params->key, key.len);
+
+		if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
+			u8 keybuf[8];
+			memcpy(keybuf, &key.data[24], sizeof(keybuf));
+			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+			memcpy(&key.data[16], keybuf, sizeof(keybuf));
+		}
+
+		/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+		if (params->seq && params->seq_len == 6) {
+			/* rx iv */
+			u8 *ivptr;
+			ivptr = (u8 *) params->seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = true;
+		}
+
+		switch (params->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+			key.algo = CRYPTO_ALGO_WEP1;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			break;
+		case WLAN_CIPHER_SUITE_WEP104:
+			key.algo = CRYPTO_ALGO_WEP128;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			key.algo = CRYPTO_ALGO_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+			break;
+		default:
+			WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+			return -EINVAL;
+		}
+		swap_key_from_BE(&key);
+#ifdef CONFIG_WIRELESS_EXT
+		dhd_wait_pend8021x(dev);
+#endif
+		wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+			sizeof(ioctlbuf), bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			return err;
+		}
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params)
+{
+	struct wl_wsec_key key;
+	s32 val = 0;
+	s32 wsec = 0;
+	s32 err = 0;
+	uint8 keybuf[8];
+	s32 bssidx = 0;
+	struct wl_priv *wl = WL_PRIV_GET();
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	CHECK_SYS_UP();
+
+	bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	if (mac_addr) {
+		wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+		goto exit;
+	}
+	memset(&key, 0, sizeof(key));
+
+	key.len = (u32) params->key_len;
+	key.index = (u32) key_idx;
+
+	if (unlikely(key.len > sizeof(key.data))) {
+		WL_ERR(("Too long key length (%u)\n", key.len));
+		return -EINVAL;
+	}
+	memcpy(key.data, params->key, key.len);
+
+	key.flags = WL_PRIMARY_KEY;
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key.algo = CRYPTO_ALGO_WEP1;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key.algo = CRYPTO_ALGO_WEP128;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key.algo = CRYPTO_ALGO_TKIP;
+		val = TKIP_ENABLED;
+		/* wpa_supplicant switches the third and fourth quarters of the TKIP key, linm */
+		bcopy(&key.data[24], keybuf, sizeof(keybuf));
+		bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+		bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+		break;
+	default:
+		WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+		return -EINVAL;
+	}
+
+	/* Set the new key/index */
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+		sizeof(ioctlbuf), bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		return err;
+	}
+
+exit:
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("get wsec error (%d)\n", err));
+		return err;
+	}
+
+	wsec |= val;
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wsec error (%d)\n", err));
+		return err;
+	}
+
+#ifdef NOT_YET
+	/* TODO: Removed in P2P, check later --lm */
+	val = 1;		/* assume shared key. otherwise 0 */
+	val = htod32(val);
+	err = wldev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_AUTH error (%d)\n", err));
+		return err;
+	}
+#endif
+	return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+	struct wl_wsec_key key;
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	WL_DBG(("Enter\n"));
+	CHECK_SYS_UP();
+	memset(&key, 0, sizeof(key));
+
+	key.index = (u32) key_idx;
+	key.flags = WL_PRIMARY_KEY;
+	key.algo = CRYPTO_ALGO_OFF;
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	/* Set the new key/index */
+	swap_key_from_BE(&key);
+	wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), ioctlbuf,
+		sizeof(ioctlbuf), bssidx);
+	if (unlikely(err)) {
+		if (err == -EINVAL) {
+			if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+				/* we ignore this key index in this case */
+				WL_DBG(("invalid key index (%d)\n", key_idx));
+			}
+		} else {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		}
+		return err;
+	}
+
+#ifdef NOT_YET
+	/* TODO: Removed in P2P twig, check later --lin */
+	val = 0;		/* assume open key. otherwise 1 */
+	val = htod32(val);
+	err = wldev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_AUTH error (%d)\n", err));
+		return err;
+	}
+#endif
+	return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+	void (*callback) (void *cookie, struct key_params * params))
+{
+	struct key_params params;
+	struct wl_wsec_key key;
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct wl_security *sec;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	CHECK_SYS_UP();
+	memset(&key, 0, sizeof(key));
+	key.index = key_idx;
+	swap_key_to_BE(&key);
+	memset(&params, 0, sizeof(params));
+	params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+	memcpy(params.key, key.data, params.key_len);
+
+	wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	switch (wsec) {
+	case WEP_ENABLED:
+		sec = wl_read_prof(wl, WL_PROF_SEC);
+		if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+			params.cipher = WLAN_CIPHER_SUITE_WEP40;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+			params.cipher = WLAN_CIPHER_SUITE_WEP104;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		}
+		break;
+	case TKIP_ENABLED:
+		params.cipher = WLAN_CIPHER_SUITE_TKIP;
+		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		break;
+	case AES_ENABLED:
+		params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		break;
+	default:
+		WL_ERR(("Invalid algo (0x%x)\n", wsec));
+		return -EINVAL;
+	}
+
+	callback(cookie, &params);
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev, u8 key_idx)
+{
+	WL_INFO(("Not supported\n"));
+	CHECK_SYS_UP();
+	return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+	u8 *mac, struct station_info *sinfo)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	scb_val_t scb_val;
+	int rssi;
+	s32 rate;
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	if (unlikely
+		(memcmp(mac, wl_read_prof(wl, WL_PROF_BSSID), ETHER_ADDR_LEN))) {
+		WL_ERR(("Wrong Mac address\n"));
+		return -ENOENT;
+	}
+
+	/* Report the current tx rate */
+	err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), FALSE);
+	if (err) {
+		WL_ERR(("Could not get rate (%d)\n", err));
+	} else {
+		rate = dtoh32(rate);
+		sinfo->filled |= STATION_INFO_TX_BITRATE;
+		sinfo->txrate.legacy = rate * 5;
+		WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+	}
+
+	if (test_bit(WL_STATUS_CONNECTED, &wl->status)) {
+		memset(&scb_val, 0, sizeof(scb_val));
+		scb_val.val = 0;
+		err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+			sizeof(scb_val_t), FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+			return err;
+		}
+		rssi = dtoh32(scb_val.val);
+		sinfo->filled |= STATION_INFO_SIGNAL;
+		sinfo->signal = rssi;
+		WL_DBG(("RSSI %d dBm\n", rssi));
+	}
+
+#if defined(ANDROID_WIRELESS_PATCH)
+	err = wldev_ioctl(dev, WLC_GET_RATE, &sinfo->link_speed, sizeof(sinfo->link_speed), FALSE);
+	sinfo->link_speed = sinfo->link_speed / 2; /* Convert internal 500Kbps to Mpbs */
+	if (!err)
+		sinfo->filled |= STATION_LINK_SPEED;
+	else
+		WL_ERR(("WLC_GET_RATE failed\n"));
+#endif
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	bool enabled, s32 timeout)
+{
+	s32 pm;
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	pm = enabled ? PM_FAST : PM_OFF;
+	pm = htod32(pm);
+	WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled")));
+	err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), FALSE);
+	if (unlikely(err)) {
+		if (err == -ENODEV)
+			WL_DBG(("net_device is not ready yet\n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+	u32 ret = 0;
+
+	if (bit16 & 0xff00) {
+		ret += 8;
+		bit16 >>= 8;
+	}
+
+	if (bit16 & 0xf0) {
+		ret += 4;
+		bit16 >>= 4;
+	}
+
+	if (bit16 & 0xc) {
+		ret += 2;
+		bit16 >>= 2;
+	}
+
+	if (bit16 & 2)
+		ret += bit16 & 2;
+	else if (bit16)
+		ret += bit16;
+
+	return ret;
+}
+
+static s32
+wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
+	const u8 *addr,
+	const struct cfg80211_bitrate_mask *mask)
+{
+	struct wl_rateset rateset;
+	s32 rate;
+	s32 val;
+	s32 err_bg;
+	s32 err_a;
+	u32 legacy;
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	/* addr param is always NULL. ignore it */
+	/* Get current rateset */
+	err = wldev_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
+		sizeof(rateset), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("could not get current rateset (%d)\n", err));
+		return err;
+	}
+
+	rateset.count = dtoh32(rateset.count);
+
+	legacy = wl_find_msb(mask->control[IEEE80211_BAND_2GHZ].legacy);
+	if (!legacy)
+		legacy = wl_find_msb(mask->control[IEEE80211_BAND_5GHZ].legacy);
+
+	val = wl_g_rates[legacy - 1].bitrate * 100000;
+
+	if (val < rateset.count) {
+		/* Select rate by rateset index */
+		rate = rateset.rates[val] & 0x7f;
+	} else {
+		/* Specified rate in bps */
+		rate = val / 500000;
+	}
+
+	WL_DBG(("rate %d mbps\n", (rate / 2)));
+
+	/*
+	 *
+	 *      Set rate override,
+	 *      Since the is a/b/g-blind, both a/bg_rate are enforced.
+	 */
+	err_bg = wl_dev_intvar_set(dev, "bg_rate", rate);
+	err_a = wl_dev_intvar_set(dev, "a_rate", rate);
+	if (unlikely(err_bg && err_a)) {
+		WL_ERR(("could not set fixed rate (%d) (%d)\n", err_bg, err_a));
+		return err_bg | err_a;
+	}
+
+	return err;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 err = 0;
+
+	if (unlikely(!test_bit(WL_STATUS_READY, &wl->status))) {
+		WL_INFO(("device is not ready : status (%d)\n",
+			(int)wl->status));
+		return 0;
+	}
+
+	wl_invoke_iscan(wl);
+
+	return err;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 err = 0;
+
+	if (unlikely(!test_bit(WL_STATUS_READY, &wl->status))) {
+		WL_INFO(("device is not ready : status (%d)\n",
+			(int)wl->status));
+		return 0;
+	}
+
+	set_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
+	wl_term_iscan(wl);
+	if (wl->scan_request) {
+		cfg80211_scan_done(wl->scan_request, true);
+		wl->scan_request = NULL;
+	}
+	clear_bit(WL_STATUS_SCANNING, &wl->status);
+	clear_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
+
+	return err;
+}
+
+static __used s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+	s32 err)
+{
+	int i, j;
+
+	WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+		WL_DBG(("PMKID[%d]: %pM =\n", i,
+			&pmk_list->pmkids.pmkid[i].BSSID));
+		for (j = 0; j < WPA2_PMKID_LEN; j++) {
+			WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+		}
+	}
+	if (likely(!err)) {
+		err = wl_dev_bufvar_set(dev, "pmkid_info", (char *)pmk_list,
+			sizeof(*pmk_list));
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 err = 0;
+	int i;
+
+	CHECK_SYS_UP();
+	for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+			ETHER_ADDR_LEN))
+			break;
+	if (i < WL_NUM_PMKIDS_MAX) {
+		memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+			ETHER_ADDR_LEN);
+		memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+			WPA2_PMKID_LEN);
+		if (i == wl->pmk_list->pmkids.npmkid)
+			wl->pmk_list->pmkids.npmkid++;
+	} else {
+		err = -EINVAL;
+	}
+	WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+		&wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n",
+			wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].
+			PMKID[i]));
+	}
+
+	err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	struct _pmkid_list pmkid;
+	s32 err = 0;
+	int i;
+
+	CHECK_SYS_UP();
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+	memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+	WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+		&pmkid.pmkid[0].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+	}
+
+	for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp
+		    (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+		     ETHER_ADDR_LEN))
+			break;
+
+	if ((wl->pmk_list->pmkids.npmkid > 0) &&
+		(i < wl->pmk_list->pmkids.npmkid)) {
+		memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+		for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
+			memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
+				&wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
+				ETHER_ADDR_LEN);
+			memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
+				&wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		wl->pmk_list->pmkids.npmkid--;
+	} else {
+		err = -EINVAL;
+	}
+
+	err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	s32 err = 0;
+
+	CHECK_SYS_UP();
+	memset(wl->pmk_list, 0, sizeof(*wl->pmk_list));
+	err = wl_update_pmklist(dev, wl->pmk_list, err);
+	return err;
+
+}
+
+wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+	wl_scan_params_t *params;
+	int params_size;
+	int num_chans;
+
+	*out_params_size = 0;
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+	params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		WL_ERR(("%s: mem alloc failed (%d bytes)\n", __func__, params_size));
+		return params;
+	}
+	memset(params, 0, params_size);
+	params->nprobes = nprobes;
+
+	num_chans = (channel == 0) ? 0 : 1;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = DOT11_SCANTYPE_ACTIVE;
+	params->nprobes = htod32(1);
+	params->active_time = htod32(-1);
+	params->passive_time = htod32(-1);
+	params->home_time = htod32(10);
+	params->channel_list[0] = htodchanspec(channel);
+
+	/* Our scan params have 1 channel and 0 ssids */
+	params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	*out_params_size = params_size;	/* rtn size to the caller */
+	return params;
+}
+s32
+wl_cfg80211_scan_abort(struct wl_priv *wl, struct net_device *ndev)
+{
+	wl_scan_params_t *params;
+	s32 params_size;
+	s32 err = BCME_OK;
+
+	WL_DBG(("Enter\n"));
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+	if (params == NULL) {
+		WL_ERR(("scan params allocation failed \n"));
+		err = -ENOMEM;
+	}
+	/* Do a scan abort to stop the driver's scan engine */
+	err = wldev_ioctl(ndev, WLC_SCAN, params, params_size, FALSE);
+	if (err < 0) {
+		WL_ERR(("scan abort  failed \n"));
+	}
+	/* Report the scan abort to CFG driver too 
+	 * We will report it from the thread context, not from this context.
+	 */
+
+	{
+		wl_event_msg_t msg;
+		msg.event_type =  hton32(WLC_E_ESCAN_RESULT);
+		msg.status = WLC_E_STATUS_ABORT;
+		wl_cfg80211_event(ndev, &msg, NULL);
+	}
+	return err;
+}
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel * channel,
+	enum nl80211_channel_type channel_type,
+	unsigned int duration, u64 *cookie)
+{
+	s32 target_channel;
+
+	s32 err = BCME_OK;
+	struct wl_priv *wl = WL_PRIV_GET();
+	dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+	WL_DBG(("Enter, netdev_ifidx: %d \n", dev->ifindex));
+	if (likely(test_bit(WL_STATUS_SCANNING, &wl->status))) {
+		wl_cfg80211_scan_abort(wl, dev);
+	}
+
+	target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+	memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+	wl->remain_on_chan_type = channel_type;
+	wl->cache_cookie = *cookie;
+	cfg80211_ready_on_channel(dev, *cookie, channel,
+		channel_type, duration, GFP_KERNEL);
+	if (!p2p_on(wl)) {
+		wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p.dev_addr, &wl->p2p.int_addr);
+
+		/* In case of p2p_listen command, supplicant send remain_on_channel
+		 * without turning on P2P
+		 */
+
+		err = wl_cfgp2p_enable_discovery(wl, dev, NULL, 0);
+
+		if (unlikely(err)) {
+			goto exit;
+		}
+		p2p_on(wl) = true;
+	}
+	if (p2p_on(wl))
+		wl_cfgp2p_discover_listen(wl, target_channel, duration);
+
+
+exit:
+	return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, u64 cookie)
+{
+
+	s32 err = 0;
+	WL_DBG((" enter ) netdev_ifidx: %d \n", dev->ifindex));
+
+
+	return err;
+}
+
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel *channel, bool offchan,
+	enum nl80211_channel_type channel_type,
+	bool channel_type_valid, unsigned int wait,
+	const u8* buf, size_t len, u64 *cookie)
+{
+	wl_action_frame_t *action_frame;
+	wl_af_params_t *af_params;
+	wifi_p2p_ie_t *p2p_ie;
+	wpa_ie_fixed_t *wps_ie;
+	const struct ieee80211_mgmt *mgmt;
+	struct wl_priv *wl = WL_PRIV_GET();
+	dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+	s32 err = BCME_OK;
+	s32 bssidx = 0;
+	u32 p2pie_len = 0;
+	u32 wpsie_len = 0;
+	u16 fc;
+	bool ack = FALSE;
+	WL_DBG(("Enter \n"));
+	/* find bssidx based on ndev */
+	bssidx = wl_cfgp2p_find_idx(wl, dev);
+	/* cookie generation */
+	*cookie = (unsigned long) buf;
+
+	if (bssidx == -1) {
+
+		WL_ERR(("Can not find the bssidx for dev( %p )\n", dev));
+		return -ENODEV;
+	}
+	if (p2p_on(wl)) {
+		wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p.dev_addr, &wl->p2p.int_addr);
+
+	   /* Suspend P2P discovery search-listen to prevent it from changing the
+		* channel.
+		*/
+		wl_cfgp2p_discover_enable_search(wl, FALSE);
+	    /* Abort the dwell time of any previous off-channel action frame that may
+	     * be still in effect.  Sending off-channel action frames relies on the
+	     * driver's scan engine.  If a previous off-channel action frame tx is
+	     * still in progress (including the dwell time), then this new action
+	     * frame will not be sent out.
+	     */
+		wl_cfg80211_scan_abort(wl, dev);
+	}
+	mgmt = (const struct ieee80211_mgmt *) buf;
+	fc = mgmt->frame_control;
+	if (fc != IEEE80211_STYPE_ACTION) {
+		if (p2p_on(wl) && (fc == IEEE80211_STYPE_PROBE_RESP)) {
+			s32 ie_offset =  DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+			s32 ie_len = len - ie_offset;
+			if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)(buf + ie_offset), ie_len))
+				!= NULL) {
+				/* Total length of P2P Information Element */
+				p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+				/* Have to change p2p device address in dev_info attribute
+				 * because Supplicant use primary eth0 address
+				 */
+			#ifdef ENABLE_DRIVER_CHANGE_IFADDR /* We are now doing this in supplicant */
+				wl_cfg80211_change_ifaddr((u8 *)p2p_ie,
+					&wl->p2p_dev_addr, P2P_SEID_DEV_INFO);
+			#endif
+			}
+			if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len))
+				!= NULL) {
+				/* Order of Vendor IE is 1) WPS IE +
+				 * 2) P2P IE created by supplicant
+				 *  So, it is ok to find start address of WPS IE
+				 *  to save IEs to firmware
+				 */
+				wpsie_len = wps_ie->length + sizeof(wps_ie->length) +
+					sizeof(wps_ie->tag);
+				wl_cfgp2p_set_managment_ie(wl, dev, bssidx,
+					VNDR_IE_PRBRSP_FLAG,
+					(u8 *)wps_ie, wpsie_len + p2pie_len);
+
+			}
+		}
+		cfg80211_mgmt_tx_status(dev, *cookie, buf, len, true, GFP_KERNEL);
+
+		goto exit;
+	}
+	af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+	if (af_params == NULL)
+	{
+		WL_ERR(("unable to allocate frame\n"));
+		return -ENOMEM;
+	}
+
+	action_frame = &af_params->action_frame;
+
+	/* Add the packet Id */
+	action_frame->packetId = (u32) action_frame;
+	WL_DBG(("action frame %d\n", action_frame->packetId));
+	/* Add BSSID */
+	memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+	memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+	/* Add the length exepted for 802.11 header  */
+	action_frame->len = len - DOT11_MGMT_HDR_LEN;
+	WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+	/* Add the channel */
+	af_params->channel =
+		ieee80211_frequency_to_channel(channel->center_freq);
+
+	/* Add the dwell time
+	 * Dwell time to stay off-channel to wait for a response action frame
+	 * after transmitting an GO Negotiation action frame
+	 */
+	af_params->dwell_time = WL_DWELL_TIME;
+
+	memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+	if (wl->p2p.vif_created) {
+		wifi_p2p_pub_act_frame_t *act_frm =
+			(wifi_p2p_pub_act_frame_t *) (action_frame->data);
+		/*
+		 *  Have to change intented address from GO REQ or GO RSP and INVITE REQ
+		 *  because wpa-supplicant use eth0 primary address
+		 */
+		if ((act_frm->subtype == P2P_PAF_GON_REQ)||
+		  (act_frm->subtype == P2P_PAF_GON_RSP)||
+		  (act_frm->subtype == P2P_PAF_GON_CONF)||
+		  (act_frm->subtype == P2P_PAF_INVITE_REQ)) {
+			p2p_ie = wl_cfgp2p_find_p2pie(act_frm->elts,
+				action_frame->len - P2P_PUB_AF_FIXED_LEN);
+		#ifdef ENABLE_DRIVER_CHANGE_IFADDR /* We are now doing this in supplicant */
+			wl_cfg80211_change_ifaddr((u8 *)p2p_ie, &wl->p2p.int_addr,
+				P2P_SEID_INTINTADDR);
+			wl_cfg80211_change_ifaddr((u8 *)p2p_ie, &wl->p2p.dev_addr,
+				P2P_SEID_DEV_INFO);
+			wl_cfg80211_change_ifaddr((u8 *)p2p_ie, &wl->p2p.dev_addr,
+				P2P_SEID_GROUP_ID);
+		#endif
+		}
+	}
+
+	ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? FALSE : TRUE;
+	cfg80211_mgmt_tx_status(dev, *cookie, buf, len, ack, GFP_KERNEL);
+
+	kfree(af_params);
+exit:
+	return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev,
+	u16 frame_type, bool reg)
+{
+
+	WL_DBG(("%s: frame_type: %x, reg: %d\n", __func__, frame_type, reg));
+
+	if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+		return;
+
+	return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+	struct net_device *dev,
+	struct bss_parameters *params)
+{
+	if (params->use_cts_prot >= 0) {
+	}
+
+	if (params->use_short_preamble >= 0) {
+	}
+
+	if (params->use_short_slot_time >= 0) {
+	}
+
+	if (params->basic_rates) {
+	}
+
+	if (params->ap_isolate >= 0) {
+	}
+
+	if (params->ht_opmode >= 0) {
+	}
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel *chan,
+	enum nl80211_channel_type channel_type)
+{
+	s32 channel;
+	s32 err = BCME_OK;
+
+	channel = ieee80211_frequency_to_channel(chan->center_freq);
+	WL_DBG(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+		dev->ifindex, channel_type, channel));
+	wldev_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel), FALSE);
+
+	return err;
+}
+
+static s32
+wl_check_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	s32 len = wpa2ie->len;
+	s32 err = BCME_OK;
+	u16 auth = 0; /* d11 open authentication */
+	u16 wpa_auth = 0;
+	u16 count;
+	u32 wsec;
+	u32 pval;
+	u32 gval;
+	u8* tmp;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	if (wpa2ie == NULL)
+		goto exit;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	tmp = mcast->oui;
+	switch (tmp[DOT11_OUI_LEN]) {
+		case WPA_CIPHER_NONE:
+			gval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			gval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			gval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("No Security Info\n"));
+			break;
+	}
+	len -= WPA_SUITE_LEN;
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	tmp = ucast->list[0].oui;
+	switch (tmp[DOT11_OUI_LEN]) {
+		case WPA_CIPHER_NONE:
+			pval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			pval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			pval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("No Security Info\n"));
+			break;
+	}
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[1];
+	count = ltoh16_ua(&mgmt->count);
+	tmp = (u8 *)&mgmt->list[0];
+	switch (tmp[DOT11_OUI_LEN]) {
+		case RSN_AKM_NONE:
+			wpa_auth = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			wpa_auth = WPA2_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			wpa_auth = WPA2_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+			break;
+	}
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+	}
+exit:
+	return 0;
+}
+
+static s32
+wl_check_wpaie(struct net_device *dev, bcm_tlv_t *wpaie, s32 bssidx)
+{
+	if (wpaie == NULL)
+		goto exit;
+
+exit:
+	return 0;
+}
+
+static s32
+wl_cfg80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+	struct beacon_parameters *info)
+{
+	s32 err = BCME_OK;
+	bcm_tlv_t *ssid_ie;
+	wpa_ie_fixed_t *wps_ie;
+	wpa_ie_fixed_t *wpa_ie;
+	bcm_tlv_t *wpa2_ie;
+	wifi_p2p_ie_t *p2p_ie;
+	struct wl_priv *wl = WL_PRIV_GET();
+	bool is_bssup = false;
+	u16 wpsie_len = 0;
+	u16 p2pie_len = 0;
+	u8 beacon_ie[IE_MAX_LEN];
+	s32 ie_offset = 0;
+	s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+	s32 infra = 1;
+
+	memset(beacon_ie, 0, sizeof(beacon_ie));
+
+	WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+		info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+
+	if (p2p_on(wl) && (bssidx >= wl_to_p2p_bss_bssidx(wl,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		/* We don't need to set beacon for P2P_GO,
+		 * but need to parse ssid from beacon_parameters
+		 * because there is no way to set ssid
+		 */
+		ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+		/* find the SSID */
+		if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+			info->head_len - ie_offset,
+			DOT11_MNG_SSID_ID)) != NULL) {
+			memcpy(wl->p2p.ssid.SSID, ssid_ie->data, ssid_ie->len);
+			wl->p2p.ssid.SSID_len = ssid_ie->len;
+			WL_DBG(("SSID (%s) in Head \n", ssid_ie->data));
+
+		} else {
+			WL_ERR(("No SSID in beacon \n"));
+		}
+
+		/* find the WPSIE */
+		if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+			wpsie_len = wps_ie->length + sizeof(wps_ie->tag) + sizeof(wps_ie->length);
+			/*
+			 * Should be compared with saved ie before saving it
+			 */
+			if (wl_dbg_level & WL_DBG_INFO)
+				wl_dbg_dump_wps_ie((char *) wps_ie);
+			memcpy(beacon_ie, wps_ie, wpsie_len);
+		} else {
+			WL_ERR(("No WPSIE in beacon \n"));
+		}
+
+
+		/* find the P2PIE */
+		if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)info->tail, info->tail_len)) != NULL) {
+			/* Total length of P2P Information Element */
+			p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+		#ifdef ENABLE_DRIVER_CHANGE_IFADDR /* We are now doing this in supplicant */
+			/* Have to change device address in dev_id attribute because Supplicant
+			 * use primary eth0 address
+			 */
+			wl_cfg80211_change_ifaddr((u8 *)p2p_ie, &wl->p2p_dev_addr, P2P_SEID_DEV_ID);
+		#endif
+			memcpy(&beacon_ie[wpsie_len], p2p_ie, p2pie_len);
+
+		} else {
+			WL_ERR(("No P2PIE in beacon \n"));
+		}
+		wl_cfgp2p_set_managment_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+			beacon_ie, wpsie_len + p2pie_len);
+		wl_cfgp2p_set_managment_ie(wl, dev, bssidx, VNDR_IE_ASSOCRSP_FLAG,
+			beacon_ie, wpsie_len + p2pie_len);
+		/* find the WPA_IE */
+		if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail, info->tail_len)) != NULL) {
+			WL_DBG((" WPA IE is found\n"));
+		}
+		/* find the RSN_IE */
+		if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+			DOT11_MNG_RSN_ID)) != NULL) {
+			WL_DBG((" WPA2 IE is found\n"));
+		}
+		is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+
+		if (!is_bssup && (wpa_ie != NULL || wpa2_ie != NULL)) {
+			wl_check_wpa2ie(dev, wpa2_ie, bssidx);
+			wl_check_wpaie(dev, (bcm_tlv_t *)wpa_ie, bssidx);
+
+			err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), FALSE);
+			if (err < 0) {
+				WL_ERR(("SET INFRA error %d\n", err));
+			}
+
+			err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p.ssid,
+				sizeof(wl->p2p.ssid), ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+			wl_cfgp2p_bss(dev, bssidx, 1);
+		}
+	}
+	return 0;
+}
+
+#if defined(ANDROID_WIRELESS_PATCH)
+static s32
+wl_cfg80211_drv_start(struct wiphy *wiphy, struct net_device *dev)
+{
+	/* struct wl_priv *wl = wiphy_to_wl(wiphy); */
+	s32 err = 0;
+
+	printk("Android driver start command\n");
+	return err;
+}
+
+static s32
+wl_cfg80211_drv_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+	/* struct wl_priv *wl = wiphy_to_wl(wiphy); */
+	s32 err = 0;
+
+	printk("Android driver stop command\n");
+	return err;
+}
+#endif /* defined(ANDROID_WIRELESS_PATCH) */
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+	.add_virtual_intf = wl_cfg80211_add_virtual_iface,
+	.del_virtual_intf = wl_cfg80211_del_virtual_iface,
+	.change_virtual_intf = wl_cfg80211_change_virtual_iface,
+	.scan = wl_cfg80211_scan,
+	.set_wiphy_params = wl_cfg80211_set_wiphy_params,
+	.join_ibss = wl_cfg80211_join_ibss,
+	.leave_ibss = wl_cfg80211_leave_ibss,
+	.get_station = wl_cfg80211_get_station,
+	.set_tx_power = wl_cfg80211_set_tx_power,
+	.get_tx_power = wl_cfg80211_get_tx_power,
+	.add_key = wl_cfg80211_add_key,
+	.del_key = wl_cfg80211_del_key,
+	.get_key = wl_cfg80211_get_key,
+	.set_default_key = wl_cfg80211_config_default_key,
+	.set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+	.set_power_mgmt = wl_cfg80211_set_power_mgmt,
+	.set_bitrate_mask = wl_cfg80211_set_bitrate_mask,
+	.connect = wl_cfg80211_connect,
+	.disconnect = wl_cfg80211_disconnect,
+	.suspend = wl_cfg80211_suspend,
+	.resume = wl_cfg80211_resume,
+	.set_pmksa = wl_cfg80211_set_pmksa,
+	.del_pmksa = wl_cfg80211_del_pmksa,
+	.flush_pmksa = wl_cfg80211_flush_pmksa,
+	.remain_on_channel = wl_cfg80211_remain_on_channel,
+	.cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+	.mgmt_tx = wl_cfg80211_mgmt_tx,
+	.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+	.change_bss = wl_cfg80211_change_bss,
+	.set_channel = wl_cfg80211_set_channel,
+	.set_beacon = wl_cfg80211_set_beacon,
+#if defined(ANDROID_WIRELESS_PATCH)
+	.drv_start = wl_cfg80211_drv_start,
+	.drv_stop = wl_cfg80211_drv_stop
+#endif
+};
+
+static s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+	s32 err = 0;
+
+	switch (mode) {
+	case WL_MODE_BSS:
+		return NL80211_IFTYPE_STATION;
+	case WL_MODE_IBSS:
+		return NL80211_IFTYPE_ADHOC;
+	case WL_MODE_AP:
+		return NL80211_IFTYPE_AP;
+	default:
+		return NL80211_IFTYPE_UNSPECIFIED;
+	}
+
+	return err;
+}
+
+static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
+	struct device *dev)
+{
+	struct wireless_dev *wdev;
+	s32 err = 0;
+	struct wl_priv *wl;
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return ERR_PTR(-ENOMEM);
+	}
+	wdev->wiphy =
+	    wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv) + sizeof_iface);
+	if (unlikely(!wdev->wiphy)) {
+		WL_ERR(("Couldn not allocate wiphy device\n"));
+		err = -ENOMEM;
+		goto wiphy_new_out;
+	}
+	set_wiphy_dev(wdev->wiphy, dev);
+	wl = wiphy_to_wl(wdev->wiphy);
+	wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+	wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+	wdev->wiphy->interface_modes =
+	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+
+	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+	wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;
+	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wdev->wiphy->cipher_suites = __wl_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wdev->wiphy->max_remain_on_channel_duration = 5000;
+	wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+	wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+	wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif				/* !WL_POWERSAVE_DISABLED */
+	wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+		WIPHY_FLAG_4ADDR_AP |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)
+		WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+		WIPHY_FLAG_4ADDR_STATION;
+	err = wiphy_register(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+		goto wiphy_register_out;
+	}
+	return wdev;
+
+wiphy_register_out:
+	wiphy_free(wdev->wiphy);
+
+wiphy_new_out:
+	kfree(wdev);
+
+	return ERR_PTR(err);
+}
+
+static void wl_free_wdev(struct wl_priv *wl)
+{
+	int i;
+	struct wireless_dev *wdev = wl_to_wdev(wl);
+
+	if (unlikely(!wdev)) {
+		WL_ERR(("wdev is invalid\n"));
+		return;
+	}
+
+	for (i = 0; i < VWDEV_CNT; i++) {
+		if ((wl->vwdev[i] != NULL)) {
+			kfree(wl->vwdev[i]);
+			wl->vwdev[i] = NULL;
+		}
+	}
+	wiphy_unregister(wdev->wiphy);
+	wiphy_free(wdev->wiphy);
+	kfree(wdev);
+}
+
+static s32 wl_inform_bss(struct wl_priv *wl)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;	/* must be initialized */
+	s32 err = 0;
+	s32 i;
+
+	bss_list = wl->bss_list;
+	/*
+	if (unlikely(bss_list->version != WL_BSS_INFO_VERSION)) {
+		WL_ERR(("Version %d != %d\n",
+			bss_list->version, WL_BSS_INFO_VERSION));
+		return -EOPNOTSUPP;
+	}
+	*/
+	WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+	bi = next_bss(bss_list, bi);
+	for_each_bss(bss_list, bi, i) {
+		err = wl_inform_single_bss(wl, bi);
+		if (unlikely(err))
+			break;
+	}
+	return err;
+}
+
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
+{
+	struct wiphy *wiphy = wiphy_from_scan(wl);
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *band;
+	struct wl_cfg80211_bss_info *notif_bss_info;
+	struct wl_scan_req *sr = wl_to_sr(wl);
+	struct beacon_proberesp *beacon_proberesp;
+	s32 mgmt_type;
+	u32 signal;
+	u32 freq;
+	s32 err = 0;
+
+	if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+		WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+		return err;
+	}
+	notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+		- sizeof(u8) + WL_BSS_INFO_MAX, GFP_KERNEL);
+	if (unlikely(!notif_bss_info)) {
+		WL_ERR(("notif_bss_info alloc failed\n"));
+		return -ENOMEM;
+	}
+	mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+	notif_bss_info->channel =
+		bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec);
+
+	if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	notif_bss_info->rssi = bi->RSSI;
+	memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+	mgmt_type = wl->active_scan ?
+		IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+	if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+	    mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+	}
+	beacon_proberesp = wl->active_scan ?
+		(struct beacon_proberesp *)&mgmt->u.probe_resp :
+		(struct beacon_proberesp *)&mgmt->u.beacon;
+	beacon_proberesp->timestamp = 0;
+	beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+	beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+	wl_rst_ie(wl);
+	/*
+	* wl_add_ie is not necessary because it can only add duplicated
+	* SSID, rate information to frame_buf
+	*/
+	/*
+	* wl_add_ie(wl, WLAN_EID_SSID, bi->SSID_len, bi->SSID);
+	* wl_add_ie(wl, WLAN_EID_SUPP_RATES, bi->rateset.count,
+	* bi->rateset.rates);
+	*/
+	wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+	wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+		offsetof(struct wl_cfg80211_bss_info, frame_buf));
+	notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+		u.beacon.variable) + wl_get_ielen(wl);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+#else
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+	channel = ieee80211_get_channel(wiphy, freq);
+
+	WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
+		bi->SSID,
+		notif_bss_info->rssi, notif_bss_info->channel,
+		mgmt->u.beacon.capab_info, &bi->BSSID));
+
+	signal = notif_bss_info->rssi * 100;
+	if (unlikely(!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+		le16_to_cpu
+		(notif_bss_info->frame_len),
+		signal, GFP_KERNEL))) {
+		WL_ERR(("cfg80211_inform_bss_frame error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	kfree(notif_bss_info);
+
+	return err;
+}
+
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev)
+{
+	u32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+
+	WL_DBG(("event %d, status %d\n", event, status));
+	if (event == WLC_E_SET_SSID) {
+		if (status == WLC_E_STATUS_SUCCESS) {
+			if (!wl_is_ibssmode(wl, ndev))
+				return true;
+		}
+	}
+
+	WL_DBG(("wl_is_linkup false\n"));
+	return false;
+}
+
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+
+	if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+		return true;
+	} else if (event == WLC_E_LINK) {
+		if (!(flags & WLC_EVENT_MSG_LINK))
+			return true;
+	}
+
+	return false;
+}
+
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+
+	if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+		return true;
+	if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+		return true;
+
+	return false;
+}
+
+static s32
+wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	bool isfree = FALSE;
+	s32 err = 0;
+	s32 freq;
+	s32 channel;
+	u8 body[200];
+	u32 event = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	u32 len = ntoh32(e->datalen);
+	u16 fc = 0;
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	struct ieee80211_supported_band *band;
+	struct ether_addr da;
+	struct ether_addr bssid;
+	struct wiphy *wiphy = wl_to_wiphy(wl);
+	channel_info_t ci;
+
+	memset(body, 0, sizeof(body));
+	WL_DBG(("Enter \n"));
+
+	if (get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+		memcpy(body, data, len);
+		wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+		&da, sizeof(struct ether_addr), ioctlbuf, sizeof(ioctlbuf), bsscfgidx);
+		wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE);
+		switch (event) {
+			case WLC_E_ASSOC_IND:
+				fc = FC_ASSOC_REQ;
+				break;
+			case WLC_E_REASSOC_IND:
+				fc = FC_REASSOC_REQ;
+				break;
+			case WLC_E_DISASSOC_IND:
+				fc = FC_DISASSOC;
+				break;
+			case WLC_E_DEAUTH_IND:
+				fc = FC_DEAUTH;
+				break;
+			case WLC_E_DEAUTH:
+				fc = FC_DEAUTH;
+				break;
+			default:
+				fc = 0;
+				goto exit;
+		}
+		if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), FALSE)))
+			return err;
+
+		channel = dtoh32(ci.hw_channel);
+		if (channel <= CH_MAX_2G_CHANNEL)
+			band = wiphy->bands[IEEE80211_BAND_2GHZ];
+		else
+			band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+		freq = ieee80211_channel_to_frequency(channel);
+#else
+		freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+		err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+		&mgmt_frame, &len, body);
+		if (err < 0)
+				goto exit;
+		isfree = TRUE;
+
+		if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+			cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+		} else if (event == WLC_E_DISASSOC_IND) {
+				cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+		} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+			cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+		}
+
+	} else {
+		WL_DBG(("wl_notify_connect_status : event %d status : %d \n",
+			ntoh32(e->event_type), ntoh32(e->status)));
+		if (wl_is_linkup(wl, e, ndev)) {
+			wl_link_up(wl);
+			if (wl_is_ibssmode(wl, ndev)) {
+				printk("cfg80211_ibss_joined");
+				cfg80211_ibss_joined(ndev, (s8 *)&e->addr,
+					GFP_KERNEL);
+				WL_DBG(("joined in IBSS network\n"));
+			} else {
+				printk("wl_bss_connect_done succeeded");
+				wl_bss_connect_done(wl, ndev, e, data, true);
+				WL_DBG(("joined in BSS network \"%s\"\n",
+					((struct wlc_ssid *)
+					 wl_read_prof(wl, WL_PROF_SSID))->SSID));
+			}
+			act = true;
+			wl_update_prof(wl, e, &act, WL_PROF_ACT);
+		} else if (wl_is_linkdown(wl, e)) {
+			if (test_bit(WL_STATUS_CONNECTED, &wl->status)) {
+				printk("link down, call cfg80211_disconnected ");
+				cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+				clear_bit(WL_STATUS_CONNECTED, &wl->status);
+				wl_link_down(wl);
+				wl_init_prof(wl->profile);
+			}
+		} else if (wl_is_nonetwork(wl, e)) {
+			printk("connect failed e->status 0x%x", (int)ntoh32(e->status));
+			if (test_bit(WL_STATUS_CONNECTING, &wl->status))
+				wl_bss_connect_done(wl, ndev, e, data, false);
+		} else {
+			printk("%s nothing\n", __FUNCTION__);
+		}
+		printk("\n");
+	}
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	return err;
+}
+
+static s32
+wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	wl_bss_roaming_done(wl, ndev, e, data);
+	act = true;
+	wl_update_prof(wl, e, &act, WL_PROF_ACT);
+
+	return err;
+}
+
+static __used s32
+wl_dev_bufvar_set(struct net_device *dev, s8 *name, s8 *buf, s32 len)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	u32 buflen;
+
+	buflen = bcm_mkiovar(name, buf, len, wl->ioctl_buf, WL_IOCTL_LEN_MAX);
+	BUG_ON(unlikely(!buflen));
+
+	return wldev_ioctl(dev, WLC_SET_VAR, wl->ioctl_buf, buflen, FALSE);
+}
+
+static s32
+wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
+	s32 buf_len)
+{
+	struct wl_priv *wl = WL_PRIV_GET();
+	u32 len;
+	s32 err = 0;
+
+	len = bcm_mkiovar(name, NULL, 0, wl->ioctl_buf, WL_IOCTL_LEN_MAX);
+	BUG_ON(unlikely(!len));
+	err = wldev_ioctl(dev, WLC_GET_VAR, (void *)wl->ioctl_buf,
+		WL_IOCTL_LEN_MAX, FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	memcpy(buf, wl->ioctl_buf, buf_len);
+
+	return err;
+}
+
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev)
+{
+	wl_assoc_info_t assoc_info;
+	struct wl_connect_info *conn_info = wl_to_conn(wl);
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	err = wl_dev_bufvar_get(ndev, "assoc_info", wl->extra_buf,
+		WL_ASSOC_INFO_MAX);
+	if (unlikely(err)) {
+		WL_ERR(("could not get assoc info (%d)\n", err));
+		return err;
+	}
+	memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t));
+	assoc_info.req_len = htod32(assoc_info.req_len);
+	assoc_info.resp_len = htod32(assoc_info.resp_len);
+	assoc_info.flags = htod32(assoc_info.flags);
+	if (assoc_info.req_len) {
+		err = wl_dev_bufvar_get(ndev, "assoc_req_ies", wl->extra_buf,
+			WL_ASSOC_INFO_MAX);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc req (%d)\n", err));
+			return err;
+		}
+		conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+		if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+			conn_info->req_ie_len -= ETHER_ADDR_LEN;
+		}
+		conn_info->req_ie =
+		    kmemdup(wl->extra_buf, conn_info->req_ie_len, GFP_KERNEL);
+	} else {
+		conn_info->req_ie_len = 0;
+		conn_info->req_ie = NULL;
+	}
+	if (assoc_info.resp_len) {
+		err = wl_dev_bufvar_get(ndev, "assoc_resp_ies", wl->extra_buf,
+			WL_ASSOC_INFO_MAX);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc resp (%d)\n", err));
+			return err;
+		}
+		conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+		conn_info->resp_ie =
+		    kmemdup(wl->extra_buf, conn_info->resp_ie_len, GFP_KERNEL);
+	} else {
+		conn_info->resp_ie_len = 0;
+		conn_info->resp_ie = NULL;
+	}
+	WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+		conn_info->resp_ie_len));
+
+	return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+        size_t *join_params_size)
+{
+	chanspec_t chanspec = 0;
+
+	if (ch != 0) {
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0])
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+			htodchanspec(join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num =
+			htod32(join_params->params.chanspec_num);
+
+		WL_DBG(("%s  join_params->params.chanspec_list[0]= %X\n",
+			__FUNCTION__, join_params->params.chanspec_list[0]));
+
+	}
+}
+
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev)
+{
+	struct cfg80211_bss *bss;
+	struct wl_bss_info *bi;
+	struct wlc_ssid *ssid;
+	struct bcm_tlv *tim;
+	u16 beacon_interval;
+	u8 dtim_period;
+	size_t ie_len;
+	u8 *ie;
+	s32 err = 0;
+	struct wiphy *wiphy;
+	wiphy = wl_to_wiphy(wl);
+
+	if (wl_is_ibssmode(wl, ndev))
+		return err;
+
+	ssid = (struct wlc_ssid *)wl_read_prof(wl, WL_PROF_SSID);
+	bss = cfg80211_get_bss(wiphy, NULL, (s8 *)&wl->bssid,
+		ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+		WLAN_CAPABILITY_ESS);
+
+	rtnl_lock();
+	if (unlikely(!bss)) {
+		WL_DBG(("Could not find the AP\n"));
+		*(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+		err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_BSS_INFO,
+			wl->extra_buf, WL_EXTRA_BUF_MAX, FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("Could not get bss info %d\n", err));
+			goto update_bss_info_out;
+		}
+		bi = (struct wl_bss_info *)(wl->extra_buf + 4);
+		if (unlikely(memcmp(&bi->BSSID, &wl->bssid, ETHER_ADDR_LEN))) {
+			err = -EIO;
+			goto update_bss_info_out;
+		}
+		err = wl_inform_single_bss(wl, bi);
+		if (unlikely(err))
+			goto update_bss_info_out;
+
+		ie = ((u8 *)bi) + bi->ie_offset;
+		ie_len = bi->ie_length;
+		beacon_interval = cpu_to_le16(bi->beacon_period);
+	} else {
+		WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+		ie = bss->information_elements;
+		ie_len = bss->len_information_elements;
+		beacon_interval = bss->beacon_interval;
+		cfg80211_put_bss(bss);
+	}
+
+	tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+	if (tim) {
+		dtim_period = tim->data[1];
+	} else {
+		/*
+		* active scan was done so we could not get dtim
+		* information out of probe response.
+		* so we speficially query dtim information to dongle.
+		*/
+		err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_DTIMPRD,
+			&dtim_period, sizeof(dtim_period), FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+			goto update_bss_info_out;
+		}
+	}
+
+	wl_update_prof(wl, NULL, &beacon_interval, WL_PROF_BEACONINT);
+	wl_update_prof(wl, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+	rtnl_unlock();
+	return err;
+}
+
+static s32
+wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(wl);
+	s32 err = 0;
+
+	wl_get_assoc_ies(wl, ndev);
+	memcpy(&wl->bssid, &e->addr, ETHER_ADDR_LEN);
+	wl_update_bss_info(wl, ndev);
+	cfg80211_roamed(ndev,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+		NULL,
+#endif
+		(u8 *)&wl->bssid,
+		conn_info->req_ie, conn_info->req_ie_len,
+		conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+	WL_DBG(("Report roaming result\n"));
+
+	set_bit(WL_STATUS_CONNECTED, &wl->status);
+
+	return err;
+}
+
+static s32
+wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(wl);
+	s32 err = 0;
+
+	WL_DBG((" enter\n"));
+	wl_get_assoc_ies(wl, ndev);
+	memcpy(&wl->bssid, &e->addr, ETHER_ADDR_LEN);
+	wl_update_bss_info(wl, ndev);
+	if (test_and_clear_bit(WL_STATUS_CONNECTING, &wl->status)) {
+		cfg80211_connect_result(ndev,
+			(u8 *)&wl->bssid,
+			conn_info->req_ie,
+			conn_info->req_ie_len,
+			conn_info->resp_ie,
+			conn_info->resp_ie_len,
+			completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
+			GFP_KERNEL);
+		WL_DBG(("Report connect result - connection %s\n",
+			completed ? "succeeded" : "failed"));
+	} else {
+		cfg80211_roamed(ndev,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+			NULL,
+#endif
+			(u8 *)&wl->bssid,
+			conn_info->req_ie, conn_info->req_ie_len,
+			conn_info->resp_ie, conn_info->resp_ie_len,
+			GFP_KERNEL);
+		WL_DBG(("Report roaming result\n"));
+	}
+	set_bit(WL_STATUS_CONNECTED, &wl->status);
+
+	return err;
+}
+
+static s32
+wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	u16 flags = ntoh16(e->flags);
+	enum nl80211_key_type key_type;
+
+	rtnl_lock();
+	if (flags & WLC_EVENT_MSG_GROUP)
+		key_type = NL80211_KEYTYPE_GROUP;
+	else
+		key_type = NL80211_KEYTYPE_PAIRWISE;
+
+	cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+		NULL, GFP_KERNEL);
+	rtnl_unlock();
+
+	return 0;
+}
+
+static s32
+wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct channel_info channel_inform;
+	struct wl_scan_results *bss_list;
+	u32 len = WL_SCAN_BUF_MAX;
+	s32 err = 0;
+
+
+	WL_DBG(("Enter \n"));
+	if (wl->iscan_on && wl->iscan_kickstart)
+		return wl_wakeup_iscan(wl_to_iscan(wl));
+
+	if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
+		WL_DBG(("Scan complete while device not scanning\n"));
+		return -EINVAL;
+	}
+	if (unlikely(!wl->scan_request)) {
+	}
+	rtnl_lock();
+	err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+		sizeof(channel_inform), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("scan busy (%d)\n", err));
+		goto scan_done_out;
+	}
+	channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+	if (unlikely(channel_inform.scan_channel)) {
+
+		WL_DBG(("channel_inform.scan_channel (%d)\n",
+			channel_inform.scan_channel));
+	}
+	wl->bss_list = wl->scan_results;
+	bss_list = wl->bss_list;
+	memset(bss_list, 0, len);
+	bss_list->buflen = htod32(len);
+	err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+		err = -EINVAL;
+		goto scan_done_out;
+	}
+	bss_list->buflen = dtoh32(bss_list->buflen);
+	bss_list->version = dtoh32(bss_list->version);
+	bss_list->count = dtoh32(bss_list->count);
+
+	err = wl_inform_bss(wl);
+	if (err)
+		goto scan_done_out;
+
+scan_done_out:
+	if (wl->scan_request) {
+		WL_ERR(("cfg80211_scan_done\n"));
+		cfg80211_scan_done(wl->scan_request, false);
+		wl->scan_request = NULL;
+	}
+	rtnl_unlock();
+	return err;
+}
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody)
+{
+	struct dot11_management_header *hdr;
+	u32 totlen = 0;
+	s32 err = 0;
+	u8 *offset;
+	u32 prebody_len = *body_len;
+	switch (fc) {
+		case FC_ASSOC_REQ:
+			/* capability , listen interval */
+			totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+			break;
+
+		case FC_REASSOC_REQ:
+			/* capability, listen inteval, ap address */
+			totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+			break;
+	}
+	totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+	*pheader = kzalloc(totlen, GFP_KERNEL);
+	if (*pheader == NULL) {
+		WL_ERR(("memory alloc failed \n"));
+		return -ENOMEM;
+	}
+	hdr = (struct dot11_management_header *) (*pheader);
+	hdr->fc = htol16(fc);
+	hdr->durid = 0;
+	hdr->seq = 0;
+	offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+	bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+	bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+	bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+	bcopy((const char*)pbody, offset, prebody_len);
+	*body_len = totlen;
+	return err;
+}
+static s32
+wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct ieee80211_supported_band *band;
+	struct wiphy *wiphy = wl_to_wiphy(wl);
+	struct ether_addr da;
+	struct ether_addr bssid;
+	bool isfree = FALSE;
+	s32 err = 0;
+	s32 freq;
+	wl_event_rx_frame_data_t *rxframe =
+		(wl_event_rx_frame_data_t*)data;
+	u32 event = ntoh32(e->event_type);
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+	u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK) & 0x0f);
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+	freq = ieee80211_channel_to_frequency(channel);
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+	if (event == WLC_E_ACTION_FRAME_RX) {
+		wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+		&da, sizeof(struct ether_addr), ioctlbuf, sizeof(ioctlbuf), bsscfgidx);
+		wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE);
+		err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+			&mgmt_frame, &mgmt_frame_len,
+			(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+		if (err < 0) {
+			WL_ERR(("%s: Error in receiving action frame len %d channel %d freq %d\n",
+				__func__, mgmt_frame_len, channel, freq));
+			goto exit;
+		}
+		isfree = TRUE;
+	} else {
+		mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+	}
+
+	cfg80211_rx_mgmt(ndev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+	WL_DBG(("%s: mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", __func__,
+		mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+
+	if (isfree)
+		kfree(mgmt_frame);
+exit:
+	return 0;
+}
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+	s32 i = 0;
+	WL_DBG(("Enter \n"));
+	for (i = 0; i <= VWDEV_CNT; i++) {
+		conf->mode[i].type = -1;
+		conf->mode[i].ndev = NULL;
+	}
+	conf->frag_threshold = (u32)-1;
+	conf->rts_threshold = (u32)-1;
+	conf->retry_short = (u32)-1;
+	conf->retry_long = (u32)-1;
+	conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct wl_profile *prof)
+{
+	memset(prof, 0, sizeof(*prof));
+}
+
+static void wl_init_event_handler(struct wl_priv *wl)
+{
+	memset(wl->evt_handler, 0, sizeof(wl->evt_handler));
+
+	wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+	/* wl->evt_handler[WLC_E_JOIN] = wl_notify_connect_status; */
+	wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+	wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+	wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+	wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+	wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+	wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+
+}
+
+static s32 wl_init_priv_mem(struct wl_priv *wl)
+{
+	WL_DBG(("Enter \n"));
+	wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!wl->scan_results)) {
+		WL_ERR(("Scan results alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
+	if (unlikely(!wl->conf)) {
+		WL_ERR(("wl_conf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->profile = (void *)kzalloc(sizeof(*wl->profile), GFP_KERNEL);
+	if (unlikely(!wl->profile)) {
+		WL_ERR(("wl_profile alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->bss_info = (void *)kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+	if (unlikely(!wl->bss_info)) {
+		WL_ERR(("Bss information alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->scan_req_int =
+	    (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
+	if (unlikely(!wl->scan_req_int)) {
+		WL_ERR(("Scan req alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->ioctl_buf = (void *)kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
+	if (unlikely(!wl->ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->escan_ioctl_buf = (void *)kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
+	if (unlikely(!wl->escan_ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!wl->extra_buf)) {
+		WL_ERR(("Extra buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
+	if (unlikely(!wl->iscan)) {
+		WL_ERR(("Iscan buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->fw = (void *)kzalloc(sizeof(*wl->fw), GFP_KERNEL);
+	if (unlikely(!wl->fw)) {
+		WL_ERR(("fw object alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
+	if (unlikely(!wl->pmk_list)) {
+		WL_ERR(("pmk list alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+
+	return 0;
+
+init_priv_mem_out:
+	wl_deinit_priv_mem(wl);
+
+	return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct wl_priv *wl)
+{
+	kfree(wl->scan_results);
+	wl->scan_results = NULL;
+	kfree(wl->bss_info);
+	wl->bss_info = NULL;
+	kfree(wl->conf);
+	wl->conf = NULL;
+	kfree(wl->profile);
+	wl->profile = NULL;
+	kfree(wl->scan_req_int);
+	wl->scan_req_int = NULL;
+	kfree(wl->ioctl_buf);
+	wl->ioctl_buf = NULL;
+	kfree(wl->escan_ioctl_buf);
+	wl->escan_ioctl_buf = NULL;
+	kfree(wl->extra_buf);
+	wl->extra_buf = NULL;
+	kfree(wl->iscan);
+	wl->iscan = NULL;
+	kfree(wl->fw);
+	wl->fw = NULL;
+	kfree(wl->pmk_list);
+	wl->pmk_list = NULL;
+}
+
+static s32 wl_create_event_handler(struct wl_priv *wl)
+{
+	WL_DBG(("Enter \n"));
+	sema_init(&wl->event_sync, 0);
+	wl->event_tsk = kthread_run(wl_event_handler, wl, "wl_event_handler");
+	if (IS_ERR(wl->event_tsk)) {
+		wl->event_tsk = NULL;
+		WL_ERR(("failed to create event thread\n"));
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void wl_destroy_event_handler(struct wl_priv *wl)
+{
+	if (wl->event_tsk) {
+		send_sig(SIGTERM, wl->event_tsk, 1);
+		kthread_stop(wl->event_tsk);
+		wl->event_tsk = NULL;
+	}
+}
+
+static void wl_term_iscan(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+	WL_TRACE(("In\n"));
+	if (wl->iscan_on && iscan->tsk) {
+		iscan->state = WL_ISCAN_STATE_IDLE;
+		WL_INFO(("SIGTERM\n"));
+		send_sig(SIGTERM, iscan->tsk, 1);
+		printk("kthread_stop\n");
+		kthread_stop(iscan->tsk);
+		iscan->tsk = NULL;
+	}
+}
+
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
+{
+	struct wl_priv *wl = iscan_to_wl(iscan);
+
+	WL_DBG(("Enter \n"));
+	if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
+		WL_ERR(("Scan complete while device not scanning\n"));
+		return;
+	}
+	if (likely(wl->scan_request)) {
+		cfg80211_scan_done(wl->scan_request, aborted);
+		wl->scan_request = NULL;
+	}
+	wl->iscan_kickstart = false;
+}
+
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
+{
+	if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
+		WL_DBG(("wake up iscan\n"));
+		up(&iscan->sync);
+		return 0;
+	}
+
+	return -EIO;
+}
+
+static s32
+wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+	struct wl_scan_results **bss_list)
+{
+	struct wl_iscan_results list;
+	struct wl_scan_results *results;
+	struct wl_iscan_results *list_buf;
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
+	list_buf = (struct wl_iscan_results *)iscan->scan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WL_ISCAN_BUF_MAX);
+	err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list,
+		WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
+		WL_ISCAN_BUF_MAX);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	results->count = dtoh32(results->count);
+	WL_DBG(("results->count = %d\n", results->count));
+	WL_DBG(("results->buflen = %d\n", results->buflen));
+	*status = dtoh32(list_buf->status);
+	*bss_list = results;
+
+	return err;
+}
+
+static s32 wl_iscan_done(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl->iscan;
+	s32 err = 0;
+
+	iscan->state = WL_ISCAN_STATE_IDLE;
+	rtnl_lock();
+	wl_inform_bss(wl);
+	wl_notify_iscan_complete(iscan, false);
+	rtnl_unlock();
+
+	return err;
+}
+
+static s32 wl_iscan_pending(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl->iscan;
+	s32 err = 0;
+
+	/* Reschedule the timer */
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+	iscan->timer_on = 1;
+
+	return err;
+}
+
+static s32 wl_iscan_inprogress(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl->iscan;
+	s32 err = 0;
+
+	rtnl_lock();
+	wl_inform_bss(wl);
+	wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+	rtnl_unlock();
+	/* Reschedule the timer */
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+	iscan->timer_on = 1;
+
+	return err;
+}
+
+static s32 wl_iscan_aborted(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl->iscan;
+	s32 err = 0;
+
+	iscan->state = WL_ISCAN_STATE_IDLE;
+	rtnl_lock();
+	wl_notify_iscan_complete(iscan, true);
+	rtnl_unlock();
+
+	return err;
+}
+
+static s32 wl_iscan_thread(void *data)
+{
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
+	struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+	struct wl_priv *wl = iscan_to_wl(iscan);
+	u32 status;
+	int err = 0;
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	allow_signal(SIGTERM);
+	status = WL_SCAN_RESULTS_PARTIAL;
+	while (likely(!down_interruptible(&iscan->sync))) {
+		if (kthread_should_stop())
+			break;
+		if (iscan->timer_on) {
+			del_timer_sync(&iscan->timer);
+			iscan->timer_on = 0;
+		}
+		rtnl_lock();
+		err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
+		if (unlikely(err)) {
+			status = WL_SCAN_RESULTS_ABORTED;
+			WL_ERR(("Abort iscan\n"));
+		}
+		rtnl_unlock();
+		iscan->iscan_handler[status] (wl);
+	}
+	if (iscan->timer_on) {
+		del_timer_sync(&iscan->timer);
+		iscan->timer_on = 0;
+	}
+	WL_DBG(("%s was terminated\n", __func__));
+
+	return 0;
+}
+
+static void wl_iscan_timer(unsigned long data)
+{
+	struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+
+	if (iscan) {
+		iscan->timer_on = 0;
+		WL_DBG(("timer expired\n"));
+		wl_wakeup_iscan(iscan);
+	}
+}
+
+static s32 wl_invoke_iscan(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+	int err = 0;
+
+	if (wl->iscan_on && !iscan->tsk) {
+		iscan->state = WL_ISCAN_STATE_IDLE;
+		sema_init(&iscan->sync, 0);
+		iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+		if (IS_ERR(iscan->tsk)) {
+			WL_ERR(("Could not create iscan thread\n"));
+			iscan->tsk = NULL;
+			return -ENOMEM;
+		}
+	}
+
+	return err;
+}
+
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan)
+{
+	memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler));
+	iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done;
+	iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress;
+	iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending;
+	iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted;
+	iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted;
+}
+
+static void wl_notify_escan_complete(struct wl_priv *wl, bool aborted)
+{
+	WL_DBG(("Enter \n"));
+	if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
+		WL_ERR(("Scan complete while device not scanning\n"));
+		return;
+	}
+	if (p2p_on(wl))
+	   wl_clr_p2p_status(wl, SCANNING);
+
+	if (likely(wl->scan_request)) {
+		cfg80211_scan_done(wl->scan_request, aborted);
+		wl->scan_request = NULL;
+	}
+}
+
+static s32 wl_escan_handler(struct wl_priv *wl,
+	struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	s32 status = ntoh32(e->status);
+	wl_bss_info_t *bi;
+	wl_escan_result_t *escan_result;
+	wl_bss_info_t *bss = NULL;
+	wl_scan_results_t *list;
+	u32 bi_length;
+	u32 i;
+	WL_DBG((" enter event type : %d, status : %d \n",
+		ntoh32(e->event_type), ntoh32(e->status)));
+	if (!wl->escan_on &&
+		!test_bit(WL_STATUS_SCANNING, &wl->status)) {
+		WL_ERR(("escan is not ready \n"));
+		return err;
+	}
+
+	if (status == WLC_E_STATUS_PARTIAL) {
+		WL_INFO(("WLC_E_STATUS_PARTIAL \n"));
+		escan_result = (wl_escan_result_t *) data;
+		if (dtoh16(escan_result->bss_count) != 1) {
+			WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			goto exit;
+		}
+		bi = escan_result->bss_info;
+		bi_length = dtoh32(bi->length);
+		if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+			WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+			goto exit;
+		}
+		list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+		if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+			WL_ERR(("Buffer is too small: ignoring\n"));
+			goto exit;
+		}
+#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
+		for (i = 0; i < list->count; i++) {
+			bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+				: list->bss_info;
+
+			if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+				CHSPEC_BAND(bi->chanspec) == CHSPEC_BAND(bss->chanspec) &&
+				bi->SSID_len == bss->SSID_len &&
+				!bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+				if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+					(bi->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+					/* preserve max RSSI if the measurements are
+					 * both on-channel or both off-channel
+					 */
+					bss->RSSI = MAX(bss->RSSI, bi->RSSI);
+				} else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+					(bi->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+					/* preserve the on-channel rssi measurement
+					 * if the new measurement is off channel
+					 */
+					bss->RSSI = bi->RSSI;
+					bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+				}
+
+				goto exit;
+			}
+		}
+		memcpy(&(wl->escan_info.escan_buf[list->buflen]), bi, bi_length);
+		list->version = dtoh32(bi->version);
+		list->buflen += bi_length;
+		list->count++;
+
+	}
+	else if (status == WLC_E_STATUS_SUCCESS) {
+		wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		if (likely(wl->scan_request)) {
+			rtnl_lock();
+			WL_INFO(("ESCAN COMPLETED\n"));
+			wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+			wl_inform_bss(wl);
+			wl_notify_escan_complete(wl, false);
+			rtnl_unlock();
+		}
+	}
+	else if (status == WLC_E_STATUS_ABORT) {
+		wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		if (likely(wl->scan_request)) {
+			rtnl_lock();
+			WL_INFO(("ESCAN COMPLETED\n"));
+			wl_notify_escan_complete(wl, true);
+			rtnl_unlock();
+		}
+	}
+exit:
+	return err;
+}
+
+static s32 wl_init_iscan(struct wl_priv *wl)
+{
+	struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+	int err = 0;
+
+	if (wl->iscan_on) {
+		iscan->dev = wl_to_prmry_ndev(wl);
+		iscan->state = WL_ISCAN_STATE_IDLE;
+		wl_init_iscan_handler(iscan);
+		iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
+		init_timer(&iscan->timer);
+		iscan->timer.data = (unsigned long) iscan;
+		iscan->timer.function = wl_iscan_timer;
+		sema_init(&iscan->sync, 0);
+		iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+		if (IS_ERR(iscan->tsk)) {
+			WL_ERR(("Could not create iscan thread\n"));
+			iscan->tsk = NULL;
+			return -ENOMEM;
+		}
+		iscan->data = wl;
+	} else if (wl->escan_on) {
+		wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+		wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	}
+
+	return err;
+}
+
+static void wl_init_fw(struct wl_fw_ctrl *fw)
+{
+	fw->status = 0;
+}
+
+static s32 wl_init_priv(struct wl_priv *wl)
+{
+	struct wiphy *wiphy = wl_to_wiphy(wl);
+	s32 err = 0;
+	s32 i = 0;
+
+	wl->scan_request = NULL;
+	wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+	wl->iscan_on = false;
+	wl->escan_on = true;
+	wl->roam_on = false;
+	wl->iscan_kickstart = false;
+	wl->active_scan = true;
+	wl->dongle_up = false;
+	wl->rf_blocked = false;
+
+	for (i = 0; i < VWDEV_CNT; i++)
+		wl->vwdev[i] = NULL;
+
+	init_waitqueue_head(&wl->dongle_event_wait);
+	wl_init_eq(wl);
+	err = wl_init_priv_mem(wl);
+	if (unlikely(err))
+		return err;
+	if (unlikely(wl_create_event_handler(wl)))
+		return -ENOMEM;
+	wl_init_event_handler(wl);
+	mutex_init(&wl->usr_sync);
+	err = wl_init_iscan(wl);
+	if (unlikely(err))
+		return err;
+	wl_init_fw(wl->fw);
+	wl_init_conf(wl->conf);
+	wl_init_prof(wl->profile);
+	wl_link_down(wl);
+	wl_cfgp2p_init_priv(wl);
+
+	return err;
+}
+
+static void wl_deinit_priv(struct wl_priv *wl)
+{
+	wl_destroy_event_handler(wl);
+	wl->dongle_up = false;	/* dongle down */
+	wl_flush_eq(wl);
+	wl_link_down(wl);
+	wl_term_iscan(wl);
+	wl_deinit_priv_mem(wl);
+}
+
+#ifdef CONFIG_SYSCTL
+s32 wl_cfg80211_sysctl_export_devaddr(void *data)
+{
+	/* Export the p2p_dev_addr via sysctl interface
+	 * so that wpa_supplicant can access it
+	 */
+	dhd_pub_t *dhd = (dhd_pub_t *)data;
+	struct wl_priv *wl = WL_PRIV_GET();
+
+	wl_cfgp2p_generate_bss_mac(&dhd->mac, &wl->p2p.dev_addr, &wl->p2p.int_addr);
+
+	sprintf((char *)&wl_sysctl_macstring[0], MACSTR, MAC2STR(wl->p2p.dev_addr.octet));
+	sprintf((char *)&wl_sysctl_macstring[1], MACSTR, MAC2STR(wl->p2p.int_addr.octet));
+
+	return 0;
+}
+#endif /* CONFIG_SYSCTL */
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+	struct wl_priv * wl = NULL;
+	s32 err = 0;
+	WL_TRACE(("In\n"));
+	if (unlikely(!ndev)) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	wl = WL_PRIV_GET();
+	if (wl && !test_bit(WL_STATUS_READY, &wl->status)) {
+			if (wl->wdev &&
+				wl_cfgp2p_supported(wl, ndev)) {
+				WL_INFO(("P2P is supported on Firmware \n"));
+				wl->wdev->wiphy->interface_modes |=
+					(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO));
+			}
+	} else
+		return -ENODEV;
+
+	set_bit(WL_STATUS_READY, &wl->status);
+	return err;
+}
+s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
+{
+	struct wireless_dev *wdev;
+	struct wl_priv *wl;
+	struct wl_iface *ci;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (unlikely(!ndev)) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	wl_cfg80211_dev = kzalloc(sizeof(struct wl_dev), GFP_KERNEL);
+	if (unlikely(!wl_cfg80211_dev)) {
+		WL_ERR(("wl_cfg80211_dev is invalid\n"));
+		return -ENOMEM;
+	}
+	WL_DBG(("func %p\n", wl_cfg80211_get_sdio_func()));
+	wdev = wl_alloc_wdev(sizeof(struct wl_iface), &wl_cfg80211_get_sdio_func()->dev);
+	if (unlikely(IS_ERR(wdev)))
+		return -ENOMEM;
+
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+	wl = wdev_to_wl(wdev);
+	wl->wdev = wdev;
+	wl->pub = data;
+
+	ci = (struct wl_iface *)wl_to_ci(wl);
+	ci->wl = wl;
+	ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+	wdev->netdev = ndev;
+
+	err = wl_init_priv(wl);
+	if (unlikely(err)) {
+		WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+
+	err = wl_setup_rfkill(wl);
+	if (unlikely(err)) {
+		WL_ERR(("Failed to setup rfkill %d\n", err));
+		goto cfg80211_attach_out;
+	}
+
+#ifdef CONFIG_SYSCTL
+	if (!(wl_sysctl_hdr = register_sysctl_table(wl_sysctl_table))) {
+		WL_ERR(("%s: sysctl register failed!! \n", __func__));
+		goto cfg80211_attach_out;
+	}
+#endif
+	wl_set_drvdata(wl_cfg80211_dev, ci);
+	return err;
+
+cfg80211_attach_out:
+	err = wl_setup_rfkill(wl);
+	wl_free_wdev(wl);
+	return err;
+}
+
+void wl_cfg80211_detach(void)
+{
+	struct wl_priv *wl;
+
+	wl = WL_PRIV_GET();
+
+	WL_TRACE(("In\n"));
+
+#ifdef CONFIG_SYSCTL
+	if (wl_sysctl_hdr)
+		unregister_sysctl_table(wl_sysctl_hdr);
+#endif
+	rfkill_unregister(wl->rfkill);
+	rfkill_destroy(wl->rfkill);
+
+	wl_deinit_priv(wl);
+	wl_free_wdev(wl);
+	wl_set_drvdata(wl_cfg80211_dev, NULL);
+	kfree(wl_cfg80211_dev);
+	wl_cfg80211_dev = NULL;
+	wl_clear_sdio_func();
+}
+
+static void wl_wakeup_event(struct wl_priv *wl)
+{
+	up(&wl->event_sync);
+}
+
+static s32 wl_event_handler(void *data)
+{
+	struct net_device *netdev;
+	struct wl_priv *wl = (struct wl_priv *)data;
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
+	struct wl_event_q *e;
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	allow_signal(SIGTERM);
+	while (likely(!down_interruptible(&wl->event_sync))) {
+		if (kthread_should_stop())
+			break;
+		e = wl_deq_event(wl);
+		if (unlikely(!e)) {
+			WL_ERR(("equeue empty..\n"));
+			return 0;
+		}
+		WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+		netdev = dhd_idx2net((struct dhd_pub *)(wl->pub), e->emsg.ifidx);
+		if (!netdev)
+			netdev = wl_to_prmry_ndev(wl);
+		if (wl->evt_handler[e->etype]) {
+			wl->evt_handler[e->etype] (wl, netdev, &e->emsg, e->edata);
+		} else {
+			WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+		}
+		wl_put_event(e);
+	}
+	WL_DBG(("%s was terminated\n", __func__));
+	return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+	u32 event_type = ntoh32(e->event_type);
+	struct wl_priv *wl = WL_PRIV_GET();
+
+#if (WL_DBG_LEVEL > 0)
+	s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+	    wl_dbg_estr[event_type] : (s8 *) "Unknown";
+	WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+	if (likely(!wl_enq_event(wl, ndev, event_type, e, data)))
+		wl_wakeup_event(wl);
+}
+
+static void wl_init_eq(struct wl_priv *wl)
+{
+	wl_init_eq_lock(wl);
+	INIT_LIST_HEAD(&wl->eq_list);
+}
+
+static void wl_flush_eq(struct wl_priv *wl)
+{
+	struct wl_event_q *e;
+
+	wl_lock_eq(wl);
+	while (!list_empty(&wl->eq_list)) {
+		e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+		kfree(e);
+	}
+	wl_unlock_eq(wl);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
+{
+	struct wl_event_q *e = NULL;
+
+	wl_lock_eq(wl);
+	if (likely(!list_empty(&wl->eq_list))) {
+		e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+	}
+	wl_unlock_eq(wl);
+
+	return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg,
+	void *data)
+{
+	struct wl_event_q *e;
+	s32 err = 0;
+	uint32 evtq_size;
+	uint32 data_len;
+
+	data_len = 0;
+	if (data)
+		data_len = ntoh32(msg->datalen);
+	evtq_size = sizeof(struct wl_event_q) + data_len;
+	e = kzalloc(evtq_size, GFP_ATOMIC);
+	if (unlikely(!e)) {
+		WL_ERR(("event alloc failed\n"));
+		return -ENOMEM;
+	}
+	e->etype = event;
+	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+	if (data)
+		memcpy(e->edata, data, data_len);
+	wl_lock_eq(wl);
+	list_add_tail(&e->eq_list, &wl->eq_list);
+	wl_unlock_eq(wl);
+
+	return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+	kfree(e);
+}
+
+/* TODO: this has been changed to wl_cfg80211_set_sdio_func in FALCON
+ * Keep is as-is in P2P Twig
+ */
+void wl_cfg80211_set_sdio_func(void *func)
+{
+	cfg80211_sdio_func = (struct sdio_func *)func;
+}
+
+static void wl_clear_sdio_func(void)
+{
+	cfg80211_sdio_func = NULL;
+}
+
+struct sdio_func *wl_cfg80211_get_sdio_func(void)
+{
+	return cfg80211_sdio_func;
+}
+
+static s32 wl_dongle_mode(struct wl_priv *wl, struct net_device *ndev, s32 iftype)
+{
+	s32 infra = 0;
+	s32 err = 0;
+	s32 mode = 0;
+	switch (iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+		WL_ERR(("type (%d) : currently we do not support this mode\n",
+			iftype));
+		err = -EINVAL;
+		return err;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		break;
+	case NL80211_IFTYPE_STATION:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	default:
+		err = -EINVAL;
+		WL_ERR(("invalid type (%d)\n", iftype));
+		return err;
+	}
+	infra = htod32(infra);
+	err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		return err;
+	}
+
+	set_mode_by_netdev(wl, ndev, mode);
+
+	return 0;
+}
+
+static s32 wl_dongle_eventmsg(struct net_device *ndev)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	s32 err = 0;
+
+	/* Setup event_msgs */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("Get event_msgs error (%d)\n", err));
+		goto dongle_eventmsg_out;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+
+	setbit(eventmask, WLC_E_SET_SSID);
+	setbit(eventmask, WLC_E_PRUNE);
+	setbit(eventmask, WLC_E_AUTH);
+	setbit(eventmask, WLC_E_REASSOC);
+	setbit(eventmask, WLC_E_REASSOC_IND);
+	setbit(eventmask, WLC_E_DEAUTH_IND);
+	setbit(eventmask, WLC_E_DEAUTH);
+	setbit(eventmask, WLC_E_DISASSOC_IND);
+	setbit(eventmask, WLC_E_DISASSOC);
+	setbit(eventmask, WLC_E_JOIN);
+	setbit(eventmask, WLC_E_ASSOC_IND);
+	setbit(eventmask, WLC_E_PSK_SUP);
+	setbit(eventmask, WLC_E_LINK);
+	setbit(eventmask, WLC_E_NDIS_LINK);
+	setbit(eventmask, WLC_E_MIC_ERROR);
+	setbit(eventmask, WLC_E_PMKID_CACHE);
+	setbit(eventmask, WLC_E_TXFAIL);
+	setbit(eventmask, WLC_E_JOIN_START);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+	setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE);
+	setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE);
+	setbit(eventmask, WLC_E_P2P_PROBREQ_MSG);
+	setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	setbit(eventmask, WLC_E_ESCAN_RESULT);
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("Set event_msgs error (%d)\n", err));
+		goto dongle_eventmsg_out;
+	}
+
+dongle_eventmsg_out:
+	return err;
+}
+
+#ifndef EMBEDDED_PLATFORM
+static s32 wl_dongle_country(struct net_device *ndev, u8 ccode)
+{
+
+	s32 err = 0;
+
+	return err;
+}
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up)
+{
+	s32 err = 0;
+
+	err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_UP error (%d)\n", err));
+	}
+	return err;
+}
+
+static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode)
+{
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	err = wldev_ioctl(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_PM error (%d)\n", err));
+	}
+	return err;
+}
+
+static s32
+wl_dongle_glom(struct net_device *ndev, u32 glom, u32 dongle_align)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	s32 err = 0;
+
+	/* Match Host and Dongle rx alignment */
+	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("txglomalign error (%d)\n", err));
+		goto dongle_glom_out;
+	}
+	/* disable glom option per default */
+	bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("txglom error (%d)\n", err));
+		goto dongle_glom_out;
+	}
+dongle_glom_out:
+	return err;
+}
+
+static s32
+wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	s32 err = 0;
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	if (roamvar) {
+		bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
+			sizeof(iovbuf));
+		err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+		if (unlikely(err)) {
+			WL_ERR(("bcn_timeout error (%d)\n", err));
+			goto dongle_rom_out;
+		}
+	}
+	/* Enable/Disable built-in roaming to allow supplicant to take care of roaming */
+	bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("roam_off error (%d)\n", err));
+		goto dongle_rom_out;
+	}
+dongle_rom_out:
+	return err;
+}
+
+static s32
+wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+	s32 scan_unassoc_time)
+{
+	s32 err = 0;
+
+	err = wldev_ioctl(ndev, WLC_SET_SCAN_CHANNEL_TIME, &scan_assoc_time,
+		sizeof(scan_assoc_time), FALSE);
+	if (err) {
+		if (err == -EOPNOTSUPP) {
+			WL_INFO(("Scan assoc time is not supported\n"));
+		} else {
+			WL_ERR(("Scan assoc time error (%d)\n", err));
+		}
+		goto dongle_scantime_out;
+	}
+	err = wldev_ioctl(ndev, WLC_SET_SCAN_UNASSOC_TIME, &scan_unassoc_time,
+		sizeof(scan_unassoc_time), FALSE);
+	if (err) {
+		if (err == -EOPNOTSUPP) {
+			WL_INFO(("Scan unassoc time is not supported\n"));
+		} else {
+			WL_ERR(("Scan unassoc time error (%d)\n", err));
+		}
+		goto dongle_scantime_out;
+	}
+
+dongle_scantime_out:
+	return err;
+}
+
+static s32
+wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol)
+{
+	/* Room for "event_msgs" + '\0' + bitvec */
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	s32 err = 0;
+
+	/* Set ARP offload */
+	bcm_mkiovar("arpoe", (char *)&arpoe, 4, iovbuf, sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			WL_INFO(("arpoe is not supported\n"));
+		else
+			WL_ERR(("arpoe error (%d)\n", err));
+
+		goto dongle_offload_out;
+	}
+	bcm_mkiovar("arp_ol", (char *)&arp_ol, 4, iovbuf, sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			WL_INFO(("arp_ol is not supported\n"));
+		else
+			WL_ERR(("arp_ol error (%d)\n", err));
+
+		goto dongle_offload_out;
+	}
+
+dongle_offload_out:
+	return err;
+}
+
+static s32 wl_pattern_atoh(s8 *src, s8 *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
+		WL_ERR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2;		/* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		WL_ERR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		strncpy(num, src, 2);
+		num[2] = '\0';
+		dst[i] = (u8) simple_strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
+{
+	/* Room for "event_msgs" + '\0' + bitvec */
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	const s8 *str;
+	struct wl_pkt_filter pkt_filter;
+	struct wl_pkt_filter *pkt_filterp;
+	s32 buf_len;
+	s32 str_len;
+	u32 mask_size;
+	u32 pattern_size;
+	s8 buf[256];
+	s32 err = 0;
+
+	/* add a default packet filter pattern */
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[str_len] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (struct wl_pkt_filter *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(100);
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(0);
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(0);
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(0);
+
+	/* Parse pattern filter mask. */
+	mask_size = htod32(wl_pattern_atoh("0xff",
+		(char *)pkt_filterp->u.pattern.
+		    mask_and_pattern));
+
+	/* Parse pattern filter pattern. */
+	pattern_size = htod32(wl_pattern_atoh("0x00",
+		(char *)&pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		WL_ERR(("Mask and pattern not the same size\n"));
+		err = -EINVAL;
+		goto dongle_filter_out;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local
+	 * variable (keep_alive_pkt), and
+	 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)pkt_filterp, &pkt_filter,
+		WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	err = wldev_ioctl(ndev, WLC_SET_VAR, buf, buf_len, FALSE);
+	if (err) {
+		if (err == -EOPNOTSUPP) {
+			WL_INFO(("filter not supported\n"));
+		} else {
+			WL_ERR(("filter (%d)\n", err));
+		}
+		goto dongle_filter_out;
+	}
+
+	/* set mode to allow pattern */
+	bcm_mkiovar("pkt_filter_mode", (char *)&filter_mode, 4, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE);
+	if (err) {
+		if (err == -EOPNOTSUPP) {
+			WL_INFO(("filter_mode not supported\n"));
+		} else {
+			WL_ERR(("filter_mode (%d)\n", err));
+		}
+		goto dongle_filter_out;
+	}
+
+dongle_filter_out:
+	return err;
+}
+#endif				/* !EMBEDDED_PLATFORM */
+
+s32 wl_config_dongle(struct wl_priv *wl, bool need_lock)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+	struct net_device *ndev;
+	struct wireless_dev *wdev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (wl->dongle_up)
+		return err;
+
+	ndev = wl_to_prmry_ndev(wl);
+	wdev = ndev->ieee80211_ptr;
+	if (need_lock)
+		rtnl_lock();
+	err = wl_dongle_eventmsg(ndev);
+	if (unlikely(err))
+		goto default_conf_out;
+#ifndef EMBEDDED_PLATFORM
+	err = wl_dongle_up(ndev, 0);
+	if (unlikely(err))
+		goto default_conf_out;
+	err = wl_dongle_country(ndev, 0);
+	if (unlikely(err))
+		goto default_conf_out;
+	err = wl_dongle_power(ndev, PM_FAST);
+	if (unlikely(err))
+		goto default_conf_out;
+	err = wl_dongle_glom(ndev, 0, DHD_SDALIGN);
+	if (unlikely(err))
+		goto default_conf_out;
+	err = wl_dongle_roam(ndev, (wl->roam_on ? 0 : 1), 3);
+	if (unlikely(err))
+		goto default_conf_out;
+	err = wl_dongle_eventmsg(ndev);
+	if (unlikely(err))
+		goto default_conf_out;
+
+	wl_dongle_scantime(ndev, 40, 80);
+	wl_dongle_offload(ndev, 1, 0xf);
+	wl_dongle_filter(ndev, 1);
+#endif				/* !EMBEDDED_PLATFORM */
+
+	err = wl_dongle_mode(wl, ndev, wdev->iftype);
+	if (unlikely(err && err != -EINPROGRESS))
+		goto default_conf_out;
+	err = wl_dongle_probecap(wl);
+	if (unlikely(err))
+		goto default_conf_out;
+
+	/* -EINPROGRESS: Call commit handler */
+
+default_conf_out:
+	if (need_lock)
+		rtnl_unlock();
+
+	wl->dongle_up = true;
+
+	return err;
+
+}
+
+static s32 wl_update_wiphybands(struct wl_priv *wl)
+{
+	struct wiphy *wiphy;
+	s32 phy_list;
+	s8 phy;
+	s32 err = 0;
+
+	err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_PHYLIST, &phy_list,
+		sizeof(phy_list), FALSE);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	phy = ((char *)&phy_list)[1];
+	WL_DBG(("%c phy\n", phy));
+	if (phy == 'n' || phy == 'a') {
+		wiphy = wl_to_wiphy(wl);
+		wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
+	}
+
+	return err;
+}
+
+static s32 __wl_cfg80211_up(struct wl_priv *wl)
+{
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	wl_debugfs_add_netdev_params(wl);
+
+	err = wl_config_dongle(wl, false);
+	if (unlikely(err))
+		return err;
+
+	wl_invoke_iscan(wl);
+	set_bit(WL_STATUS_READY, &wl->status);
+	return err;
+}
+
+static s32 __wl_cfg80211_down(struct wl_priv *wl)
+{
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	/* Check if cfg80211 interface is already down */
+	if (!test_bit(WL_STATUS_READY, &wl->status))
+		return err;	/* it is even not ready */
+
+	set_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
+	wl_term_iscan(wl);
+	if (wl->scan_request) {
+		cfg80211_scan_done(wl->scan_request, true);
+
+		wl->scan_request = NULL;
+	}
+	clear_bit(WL_STATUS_READY, &wl->status);
+	clear_bit(WL_STATUS_SCANNING, &wl->status);
+	clear_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
+	clear_bit(WL_STATUS_CONNECTED, &wl->status);
+
+	wl->dongle_up = false;
+	wl_flush_eq(wl);
+	wl_link_down(wl);
+	wl_cfgp2p_down(wl);
+	wl_debugfs_remove_netdev(wl);
+
+	return err;
+}
+
+s32 wl_cfg80211_up(void)
+{
+	struct wl_priv *wl;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	wl = WL_PRIV_GET();
+	mutex_lock(&wl->usr_sync);
+	wl_cfg80211_attach_post(wl_to_prmry_ndev(wl));
+	err = __wl_cfg80211_up(wl);
+	mutex_unlock(&wl->usr_sync);
+
+	return err;
+}
+
+s32 wl_cfg80211_down(void)
+{
+	struct wl_priv *wl;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	wl = WL_PRIV_GET();
+	mutex_lock(&wl->usr_sync);
+	err = __wl_cfg80211_down(wl);
+	mutex_unlock(&wl->usr_sync);
+
+	return err;
+}
+
+static s32 wl_dongle_probecap(struct wl_priv *wl)
+{
+	s32 err = 0;
+
+	err = wl_update_wiphybands(wl);
+	if (unlikely(err))
+		return err;
+
+	return err;
+}
+
+static void *wl_read_prof(struct wl_priv *wl, s32 item)
+{
+	switch (item) {
+	case WL_PROF_SEC:
+		return &wl->profile->sec;
+	case WL_PROF_ACT:
+		return &wl->profile->active;
+	case WL_PROF_BSSID:
+		return &wl->profile->bssid;
+	case WL_PROF_SSID:
+		return &wl->profile->ssid;
+	}
+	WL_ERR(("invalid item (%d)\n", item));
+	return NULL;
+}
+
+static s32
+wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e, void *data,
+	s32 item)
+{
+	s32 err = 0;
+	struct wlc_ssid *ssid;
+
+	switch (item) {
+	case WL_PROF_SSID:
+		ssid = (wlc_ssid_t *) data;
+		memset(wl->profile->ssid.SSID, 0,
+			sizeof(wl->profile->ssid.SSID));
+		memcpy(wl->profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+		wl->profile->ssid.SSID_len = ssid->SSID_len;
+		break;
+	case WL_PROF_BSSID:
+		if (data)
+			memcpy(wl->profile->bssid, data, ETHER_ADDR_LEN);
+		else
+			memset(wl->profile->bssid, 0, ETHER_ADDR_LEN);
+		break;
+	case WL_PROF_SEC:
+		memcpy(&wl->profile->sec, data, sizeof(wl->profile->sec));
+		break;
+	case WL_PROF_ACT:
+		wl->profile->active = *(bool *)data;
+		break;
+	case WL_PROF_BEACONINT:
+		wl->profile->beacon_interval = *(u16 *)data;
+		break;
+	case WL_PROF_DTIMPERIOD:
+		wl->profile->dtim_period = *(u8 *)data;
+		break;
+	default:
+		WL_ERR(("unsupported item (%d)\n", item));
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+	/*
+	* prohibit to change debug level
+	* by insmod parameter.
+	* eventually debug level will be configured
+	* in compile time by using CONFIG_XXX
+	*/
+	/* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev)
+{
+	return get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct wl_priv *wl)
+{
+	return wl->ibss_starter;
+}
+
+static void wl_rst_ie(struct wl_priv *wl)
+{
+	struct wl_ie *ie = wl_to_ie(wl);
+
+	ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
+{
+	struct wl_ie *ie = wl_to_ie(wl);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	ie->buf[ie->offset] = t;
+	ie->buf[ie->offset + 1] = l;
+	memcpy(&ie->buf[ie->offset + 2], v, l);
+	ie->offset += l + 2;
+
+	return err;
+}
+
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size)
+{
+	struct wl_ie *ie = wl_to_ie(wl);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei_stream crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+	ie->offset += ie_size;
+
+	return err;
+}
+
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size)
+{
+	struct wl_ie *ie = wl_to_ie(wl);
+	s32 err = 0;
+
+	if (unlikely(ie->offset > dst_size)) {
+		WL_ERR(("dst_size is not enough\n"));
+		return -ENOSPC;
+	}
+	memcpy(dst, &ie->buf[0], ie->offset);
+
+	return err;
+}
+
+static u32 wl_get_ielen(struct wl_priv *wl)
+{
+	struct wl_ie *ie = wl_to_ie(wl);
+
+	return ie->offset;
+}
+
+static void wl_link_up(struct wl_priv *wl)
+{
+	wl->link_up = true;
+}
+
+static void wl_link_down(struct wl_priv *wl)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(wl);
+
+	WL_DBG(("In\n"));
+	wl->link_up = false;
+	kfree(conn_info->req_ie);
+	conn_info->req_ie = NULL;
+	conn_info->req_ie_len = 0;
+	kfree(conn_info->resp_ie);
+	conn_info->resp_ie = NULL;
+	conn_info->resp_ie_len = 0;
+}
+
+static void wl_lock_eq(struct wl_priv *wl)
+{
+	spin_lock_irq(&wl->eq_lock);
+}
+
+static void wl_unlock_eq(struct wl_priv *wl)
+{
+	spin_unlock_irq(&wl->eq_lock);
+}
+
+static void wl_init_eq_lock(struct wl_priv *wl)
+{
+	spin_lock_init(&wl->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+	if (ms < 1000 / HZ) {
+		cond_resched();
+		mdelay(ms);
+	} else {
+		msleep(ms);
+	}
+}
+
+static void wl_set_drvdata(struct wl_dev *dev, void *data)
+{
+	dev->driver_data = data;
+}
+
+static void *wl_get_drvdata(struct wl_dev *dev)
+{
+	return dev->driver_data;
+}
+
+s32 wl_cfg80211_read_fw(s8 *buf, u32 size)
+{
+	const struct firmware *fw_entry;
+	struct wl_priv *wl;
+
+	wl = WL_PRIV_GET();
+
+	fw_entry = wl->fw->fw_entry;
+
+	if (fw_entry->size < wl->fw->ptr + size)
+		size = fw_entry->size - wl->fw->ptr;
+
+	memcpy(buf, &fw_entry->data[wl->fw->ptr], size);
+	wl->fw->ptr += size;
+	return size;
+}
+
+void wl_cfg80211_release_fw(void)
+{
+	struct wl_priv *wl;
+
+	wl = WL_PRIV_GET();
+	release_firmware(wl->fw->fw_entry);
+	wl->fw->ptr = 0;
+}
+
+void *wl_cfg80211_request_fw(s8 *file_name)
+{
+	struct wl_priv *wl;
+	const struct firmware *fw_entry = NULL;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	WL_DBG(("file name : \"%s\"\n", file_name));
+	wl = WL_PRIV_GET();
+
+	if (!test_bit(WL_FW_LOADING_DONE, &wl->fw->status)) {
+		err = request_firmware(&wl->fw->fw_entry, file_name,
+			&wl_cfg80211_get_sdio_func()->dev);
+		if (unlikely(err)) {
+			WL_ERR(("Could not download fw (%d)\n", err));
+			goto req_fw_out;
+		}
+		set_bit(WL_FW_LOADING_DONE, &wl->fw->status);
+		fw_entry = wl->fw->fw_entry;
+		if (fw_entry) {
+			WL_DBG(("fw size (%zd), data (%p)\n", fw_entry->size,
+				fw_entry->data));
+		}
+	} else if (!test_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status)) {
+		err = request_firmware(&wl->fw->fw_entry, file_name,
+			&wl_cfg80211_get_sdio_func()->dev);
+		if (unlikely(err)) {
+			WL_ERR(("Could not download nvram (%d)\n", err));
+			goto req_fw_out;
+		}
+		set_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status);
+		fw_entry = wl->fw->fw_entry;
+		if (fw_entry) {
+			WL_DBG(("nvram size (%zd), data (%p)\n", fw_entry->size,
+				fw_entry->data));
+		}
+	} else {
+		WL_DBG(("Downloading already done. Nothing to do more\n"));
+		err = -EPERM;
+	}
+
+req_fw_out:
+	if (unlikely(err)) {
+		return NULL;
+	}
+	wl->fw->ptr = 0;
+	return (void *)fw_entry->data;
+}
+
+s8 *wl_cfg80211_get_fwname(void)
+{
+	struct wl_priv *wl;
+
+	wl = WL_PRIV_GET();
+	strcpy(wl->fw->fw_name, WL_4329_FW_FILE);
+	return wl->fw->fw_name;
+}
+
+s8 *wl_cfg80211_get_nvramname(void)
+{
+	struct wl_priv *wl;
+
+	wl = WL_PRIV_GET();
+	strcpy(wl->fw->nvram_name, WL_4329_NVRAM_FILE);
+	return wl->fw->nvram_name;
+}
+
+static int wl_debugfs_add_netdev_params(struct wl_priv *wl)
+{
+	char buf[10+IFNAMSIZ];
+	struct dentry *fd;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	sprintf(buf, "netdev:%s", wl_to_prmry_ndev(wl)->name);
+	wl->debugfsdir = debugfs_create_dir(buf, wl_to_wiphy(wl)->debugfsdir);
+
+	fd = debugfs_create_u16("beacon_int", S_IRUGO, wl->debugfsdir,
+		(u16 *)&wl->profile->beacon_interval);
+	if (!fd) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	fd = debugfs_create_u8("dtim_period", S_IRUGO, wl->debugfsdir,
+		(u8 *)&wl->profile->dtim_period);
+	if (!fd) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+err_out:
+	return err;
+}
+
+static void wl_debugfs_remove_netdev(struct wl_priv *wl)
+{
+	WL_DBG(("Enter \n"));
+}
+
+static const struct rfkill_ops wl_rfkill_ops = {
+	.set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+	return 0;
+}
+
+static int wl_setup_rfkill(struct wl_priv *wl)
+{
+	s32 err;
+
+	WL_DBG(("Enter \n"));
+	wl->rfkill = rfkill_alloc("brcmfmac-wifi",
+		&wl_cfg80211_get_sdio_func()->dev,
+		RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl);
+
+	if (!wl->rfkill) {
+		err =  -ENOMEM;
+		goto err_out;
+	}
+
+	err = rfkill_register(wl->rfkill);
+
+	if (err)
+		rfkill_destroy(wl->rfkill);
+
+err_out:
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 0000000..4c05987
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,512 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h,v 1.1.4.1.2.8 2011/02/09 01:37:52 Exp $
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct wl_priv;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#define WL_DBG_NONE	0
+#define WL_DBG_DBG 	(1 << 2)
+#define WL_DBG_INFO	(1 << 1)
+#define WL_DBG_ERR	(1 << 0)
+#define WL_DBG_MASK ((WL_DBG_DBG | WL_DBG_INFO | WL_DBG_ERR) << 1)
+
+/* 0 invalidates all debug messages.  default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define	WL_ERR(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_ERR "CFG80211-ERROR) %s : ", __func__);	\
+			printk args;						\
+		} 								\
+} while (0)
+#define	WL_INFO(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_ERR "CFG80211-INFO) %s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#if (WL_DBG_LEVEL > 0)
+#define	WL_DBG(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_DBG) {			\
+		printk(KERN_ERR "CFG80211-DEBUG) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#else				/* !(WL_DBG_LEVEL > 0) */
+#define	WL_DBG(args)
+#endif				/* (WL_DBG_LEVEL > 0) */
+
+#define WL_SCAN_RETRY_MAX	3	/* used for ibss scan */
+#define WL_NUM_SCAN_MAX		1
+#define WL_NUM_PMKIDS_MAX	MAXPMKID	/* will be used
+						 * for 2.6.33 kernel
+						 * or later
+						 */
+#define WL_SCAN_BUF_MAX 		(1024 * 8)
+#define WL_TLV_INFO_MAX 		1024
+#define WL_SCAN_IE_LEN_MAX      2048
+#define WL_BSS_INFO_MAX			2048
+#define WL_ASSOC_INFO_MAX	512	/*
+				 * needs to grab assoc info from dongle to
+				 * report it to cfg80211 through "connect"
+				 * event
+				 */
+#define WL_IOCTL_LEN_MAX	1024
+#define WL_EXTRA_BUF_MAX	2048
+#define WL_ISCAN_BUF_MAX	2048	/*
+				 * the buf lengh can be WLC_IOCTL_MAXLEN (8K)
+				 * to reduce iteration
+				 */
+#define WL_ISCAN_TIMER_INTERVAL_MS	3000
+#define WL_SCAN_ERSULTS_LAST 	(WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX	256	/* virtually unlimitted as long
+				 * as kernel memory allows
+				 */
+#define WL_FILE_NAME_MAX		256
+#define WL_DWELL_TIME 	200
+
+/* WiFi Direct */
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN "11"
+#define VWDEV_CNT 3
+/* dongle status */
+enum wl_status {
+	WL_STATUS_READY = 0,
+	WL_STATUS_SCANNING,
+	WL_STATUS_SCAN_ABORTING,
+	WL_STATUS_CONNECTING,
+	WL_STATUS_CONNECTED
+};
+
+/* wi-fi mode */
+enum wl_mode {
+	WL_MODE_BSS,
+	WL_MODE_IBSS,
+	WL_MODE_AP
+};
+
+/* dongle profile list */
+enum wl_prof_list {
+	WL_PROF_MODE,
+	WL_PROF_SSID,
+	WL_PROF_SEC,
+	WL_PROF_IBSS,
+	WL_PROF_BAND,
+	WL_PROF_BSSID,
+	WL_PROF_ACT,
+	WL_PROF_BEACONINT,
+	WL_PROF_DTIMPERIOD
+};
+
+/* dongle iscan state */
+enum wl_iscan_state {
+	WL_ISCAN_STATE_IDLE,
+	WL_ISCAN_STATE_SCANING
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+    WL_ESCAN_STATE_IDLE,
+    WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+	WL_FW_LOADING_DONE,
+	WL_NVRAM_LOADING_DONE
+};
+
+/* beacon / probe_response */
+struct beacon_proberesp {
+	__le64 timestamp;
+	__le16 beacon_int;
+	__le16 capab_info;
+	u8 variable[0];
+} __attribute__ ((packed));
+
+/* dongle configuration */
+struct wl_conf {
+	struct net_mode {
+		struct net_device *ndev;
+		s32 type;
+	} mode [VWDEV_CNT + 1];		/* adhoc , infrastructure or ap */
+	u32 frag_threshold;
+	u32 rts_threshold;
+	u32 retry_short;
+	u32 retry_long;
+	s32 tx_power;
+	struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct wl_priv *wl,
+                            struct net_device *ndev, const wl_event_msg_t *e, void *data);
+
+/* representing interface of cfg80211 plane */
+struct wl_iface {
+	struct wl_priv *wl;
+};
+
+struct wl_dev {
+	void *driver_data;	/* to store cfg80211 object information */
+};
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+	u16 band;
+	u16 channel;
+	s16 rssi;
+	u16 frame_len;
+	u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+	struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+	u16 offset;
+	u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+	struct list_head eq_list;
+	u32 etype;
+	wl_event_msg_t emsg;
+	s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+	u32 wpa_versions;
+	u32 auth_type;
+	u32 cipher_pairwise;
+	u32 cipher_group;
+	u32 wpa_auth;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+	u8 beacon_interval;	/* in millisecond */
+	u8 atim;		/* in millisecond */
+	s8 join_only;
+	u8 band;
+	u8 channel;
+};
+
+/* dongle profile */
+struct wl_profile {
+	u32 mode;
+	struct wlc_ssid ssid;
+	u8 bssid[ETHER_ADDR_LEN];
+	u16 beacon_interval;
+	u8 dtim_period;
+	struct wl_security sec;
+	struct wl_ibss ibss;
+	s32 band;
+	bool active;
+};
+
+typedef s32(*ISCAN_HANDLER) (struct wl_priv *wl);
+
+/* dongle iscan controller */
+struct wl_iscan_ctrl {
+	struct net_device *dev;
+	struct timer_list timer;
+	u32 timer_ms;
+	u32 timer_on;
+	s32 state;
+	struct task_struct *tsk;
+	struct semaphore sync;
+	ISCAN_HANDLER iscan_handler[WL_SCAN_ERSULTS_LAST];
+	void *data;
+	s8 ioctl_buf[WLC_IOCTL_SMLEN];
+	s8 scan_buf[WL_ISCAN_BUF_MAX];
+};
+
+/* association inform */
+struct wl_connect_info {
+	u8 *req_ie;
+	s32 req_ie_len;
+	u8 *resp_ie;
+	s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+	const struct firmware *fw_entry;
+	unsigned long status;
+	u32 ptr;
+	s8 fw_name[WL_FILE_NAME_MAX];
+	s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+	u32 req_len;
+	u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+    u32 escan_state;
+    u8 escan_buf[ESCAN_BUF_SIZE];
+    struct wiphy *wiphy;
+};
+
+/* dongle private data of cfg80211 interface */
+struct wl_priv {
+	struct wireless_dev *wdev;	/* representing wl cfg80211 device */
+	struct wireless_dev *vwdev[VWDEV_CNT];
+	struct wl_conf *conf;	/* dongle configuration */
+	struct cfg80211_scan_request *scan_request;	/* scan request object */
+	EVENT_HANDLER evt_handler[WLC_E_LAST];
+	struct list_head eq_list;	/* used for event queue */
+	spinlock_t eq_lock;	/* for event queue synchronization */
+	struct mutex usr_sync;	/* maily for dongle up/down synchronization */
+	struct wl_scan_results *bss_list;
+	struct wl_scan_results *scan_results;
+
+	/* scan request object for internal purpose */
+	struct wl_scan_req *scan_req_int;
+
+	/* bss information for cfg80211 layer */
+	struct wl_cfg80211_bss_info *bss_info;
+	/* information element object for internal purpose */
+	struct wl_ie ie;
+	u8 scan_ie_buf[2048];
+	int scan_ie_len;
+	struct ether_addr bssid;	/* bssid of currently engaged network */
+
+	/* for synchronization of main event thread */
+	struct semaphore event_sync;
+	struct wl_profile *profile;	/* holding dongle profile */
+	struct wl_iscan_ctrl *iscan;	/* iscan controller */
+
+	/* association information container */
+	struct wl_connect_info conn_info;
+
+	/* control firwmare and nvram paramter downloading */
+	struct wl_fw_ctrl *fw;
+	struct wl_pmk_list *pmk_list;	/* wpa2 pmk list */
+	struct task_struct *event_tsk;	/* task of main event handler thread */
+	unsigned long status;		/* current dongle status */
+	void *pub;
+	u32 channel;		/* current channel */
+	bool iscan_on;		/* iscan on/off switch */
+	bool iscan_kickstart;	/* indicate iscan already started */
+	bool escan_on;      /* escan on/off switch */
+	struct escan_info escan_info;   /* escan information */
+	bool active_scan;	/* current scan mode */
+	bool ibss_starter;	/* indicates this sta is ibss starter */
+	bool link_up;		/* link/connection up flag */
+
+	/* indicate whether dongle to support power save mode */
+	bool pwr_save;
+	bool dongle_up;		/* indicate whether dongle up or not */
+	bool roam_on;		/* on/off switch for dongle self-roaming */
+	bool scan_tried;	/* indicates if first scan attempted */
+	u8 *ioctl_buf;	/* ioctl buffer */
+	u8 *escan_ioctl_buf;
+	u8 *extra_buf;	/* maily to grab assoc information */
+	struct dentry *debugfsdir;
+	struct rfkill *rfkill;
+	bool rf_blocked;
+
+	struct ieee80211_channel remain_on_chan;
+	enum nl80211_channel_type remain_on_chan_type;
+	u64 cache_cookie;
+	wait_queue_head_t dongle_event_wait;
+	struct p2p_info p2p;
+	s8 last_eventmask[WL_EVENTING_MASK_LEN];
+	u8 ci[0] __attribute__ ((__aligned__(NETDEV_ALIGN)));
+};
+
+#define wl_to_dev(w) (wiphy_dev(wl->wdev->wiphy))
+#define wl_to_wiphy(w) (w->wdev->wiphy)
+#define wiphy_to_wl(w) ((struct wl_priv *)(wiphy_priv(w)))
+#define wl_to_wdev(w) (w->wdev)
+#define wdev_to_wl(w) ((struct wl_priv *)(wdev_priv(w)))
+#define wl_to_prmry_ndev(w) (w->wdev->netdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define ci_to_wl(c) (ci->wl)
+#define wl_to_ci(w) (&w->ci)
+#define wl_to_sr(w) (w->scan_req_int)
+#define wl_to_ie(w) (&w->ie)
+#define iscan_to_wl(i) ((struct wl_priv *)(i->data))
+#define wl_to_iscan(w) (w->iscan)
+#define wl_to_conn(w) (&w->conn_info)
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+	return bss = bss ?
+		(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32 alloc_idx_vwdev(struct wl_priv *wl)
+{
+	s32 i = 0;
+	for (i = 0; i < VWDEV_CNT; i++) {
+		if (wl->vwdev[i] == NULL)
+				return i;
+	}
+	return -1;
+}
+
+static inline s32 get_idx_vwdev_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+	s32 i = 0;
+	for (i = 0; i < VWDEV_CNT; i++) {
+		if ((wl->vwdev[i] != NULL) && (wl->vwdev[i]->netdev == ndev))
+				return i;
+	}
+	return -1;
+}
+
+static inline s32 get_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+	s32 i = 0;
+	for (i = 0; i <= VWDEV_CNT; i++) {
+		if (wl->conf->mode[i].ndev != NULL && (wl->conf->mode[i].ndev == ndev))
+			return wl->conf->mode[i].type;
+	}
+	return -1;
+}
+static inline void set_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev, s32 type)
+{
+	s32 i = 0;
+	for (i = 0; i <= VWDEV_CNT; i++) {
+		if (type == -1) {
+			/* free the info of netdev */
+			if (wl->conf->mode[i].ndev == ndev) {
+				wl->conf->mode[i].ndev = NULL;
+				wl->conf->mode[i].type = -1;
+				break;
+			}
+
+		} else {
+			if ((wl->conf->mode[i].ndev != NULL)&&
+			(wl->conf->mode[i].ndev == ndev)) {
+				/* update type of ndev */
+				wl->conf->mode[i].type = type;
+				break;
+			}
+			else if ((wl->conf->mode[i].ndev == NULL)&&
+			(wl->conf->mode[i].type == -1)) {
+				wl->conf->mode[i].ndev = ndev;
+				wl->conf->mode[i].type = type;
+				break;
+			}
+		}
+	}
+}
+#define free_vwdev_by_index(wl, __i) do {      \
+						if (wl->vwdev[__i] != NULL) \
+							kfree(wl->vwdev[__i]); \
+						wl->vwdev[__i] = NULL; \
+					} while (0)
+
+#define for_each_bss(list, bss, __i)	\
+	for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+	((_sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) && \
+	 (!_sme->crypto.n_ciphers_pairwise) && \
+	 (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void);
+/* event handler from dongle */
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+            void *data);
+extern void wl_cfg80211_set_sdio_func(void *func);	/* set sdio function info */
+extern struct sdio_func *wl_cfg80211_get_sdio_func(void);	/* set sdio function info */
+extern s32 wl_cfg80211_up(void);	/* dongle up */
+extern s32 wl_cfg80211_down(void);	/* dongle down */
+extern s32 wl_cfg80211_notify_ifadd(struct net_device *net);
+extern s32 wl_cfg80211_notify_ifdel(struct net_device *net);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_is_progress_ifchange(void);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_notify_ifchange(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern void *wl_cfg80211_request_fw(s8 *file_name);
+extern s32 wl_cfg80211_read_fw(s8 *buf, u32 size);
+extern void wl_cfg80211_release_fw(void);
+extern s8 *wl_cfg80211_get_fwname(void);
+extern s8 *wl_cfg80211_get_nvramname(void);
+#ifdef CONFIG_SYSCTL
+extern s32 wl_cfg80211_sysctl_export_devaddr(void *data);
+#endif
+
+/* do scan abort */
+extern s32
+wl_cfg80211_scan_abort(struct wl_priv *wl, struct net_device *ndev);
+
+extern s32
+wl_cfg80211_if_is_group_owner(void);
+#endif				/* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 0000000..0a5a8a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,1229 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 Exp $
+ * $Id$
+ */
+
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+
+
+static s8 ioctlbuf[WLC_IOCTL_MAXLEN];
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+static s8 *smbuf = ioctlbuf;
+
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static s32
+wl_cfgp2p_vndr_ie(struct net_device *ndev, s32 bssidx, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete);
+/* 
+ *  Initialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_init_priv(struct wl_priv *wl)
+{
+	wl->p2p.on = 0;
+	wl->p2p.scan = 0; /* by default , legacy scan */
+	wl->p2p.status = 0;
+	wl->p2p.listen_timer = NULL;
+
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+	wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl);
+	wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0;
+	wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+	wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+	wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL;
+	wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0;
+
+}
+
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl)
+{
+	s32 ret = BCME_OK;
+
+	/* TODO : Do we have to check whether APSTA is enabled or not ? */
+	return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to create
+ * @if_type  : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec   : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	struct net_device *netdev = wl_to_prmry_ndev(wl);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_INFO(("---wl p2p_ifadd %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+	    ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+		ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+	        (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+	err = wldev_iovar_setbuf(netdev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+		ioctlbuf, sizeof(ioctlbuf));
+	return err;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to create
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = wl_to_prmry_ndev(wl);
+
+	CFGP2P_INFO(("---wl p2p_ifdel %02x:%02x:%02x:%02x:%02x:%02x\n",
+	    mac->octet[0], mac->octet[1], mac->octet[2],
+	    mac->octet[3], mac->octet[4], mac->octet[5]));
+
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+		ioctlbuf, sizeof(ioctlbuf));
+
+	if (unlikely(ret < 0)) {
+		printk("'wl p2p_ifdel' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac      : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	struct net_device *netdev = wl_to_prmry_ndev(wl);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_INFO(("---wl p2p_ifchange %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+	    ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+	    ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+	    (if_type == WL_P2P_IF_GO) ? "go" : "client",
+		(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+	err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+		ioctlbuf, sizeof(ioctlbuf));
+
+	if (unlikely(err < 0)) {
+		printk("'wl p2p_ifupd' error %d\n", err);
+	}
+	return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the created BSS
+ * @index    : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index)
+{
+	s32 ret;
+	u8 getbuf[64];
+	struct net_device *dev = wl_to_prmry_ndev(wl);
+
+	CFGP2P_INFO(("---wl p2p_if %02x:%02x:%02x:%02x:%02x:%02x\n",
+	    mac->octet[0], mac->octet[1], mac->octet[2],
+	    mac->octet[3], mac->octet[4], mac->octet[5]));
+
+	ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac),
+	            getbuf, sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY));
+
+	if (ret == 0) {
+		memcpy(index, getbuf, sizeof(index));
+		CFGP2P_INFO(("---wl p2p_if   ==> %d\n", *index));
+	}
+
+	return ret;
+}
+
+s32
+wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = wl_to_prmry_ndev(wl);
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+	}
+
+	return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode      : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel   : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx    : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+	wl_p2p_disc_st_t discovery_mode;
+	s32 ret;
+	struct net_device *dev;
+	CFGP2P_DBG(("enter\n"));
+
+	if (unlikely(bssidx >= P2PAPI_BSSCFG_MAX)) {
+		CFGP2P_ERR((" %d index out of range\n", bssidx));
+		return -1;
+	}
+
+	dev = wl_to_p2p_bss_ndev(wl, bssidx);
+	if (unlikely(dev == NULL)) {
+		CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+		return BCME_NOTFOUND;
+	}
+
+	/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+	discovery_mode.state = mode;
+	discovery_mode.chspec = CH20MHZ_CHSPEC(channel);
+	discovery_mode.dwell = listen_ms;
+	ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+	            sizeof(discovery_mode), ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+	return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+s32
+wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index)
+{
+	s32 ret;
+	struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+
+	ret = wldev_iovar_getint(dev, "p2p_dev", index);
+	CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+	if (unlikely(ret <  0)) {
+	    CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+		return ret;
+	}
+	return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl)
+{
+
+	s32 index = 0;
+	s32 ret = BCME_OK;
+
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) {
+		CFGP2P_ERR(("do nothing, already initialized\n"));
+		return ret;
+	}
+
+	ret = wl_cfgp2p_set_discovery(wl, 1);
+	if (ret < 0) {
+		CFGP2P_ERR(("set discover error\n"));
+		return ret;
+	}
+	/* Enable P2P Discovery in the WL Driver */
+	ret = wl_cfgp2p_get_disc_idx(wl, &index);
+
+	if (ret < 0) {
+		return ret;
+	}
+	wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) =
+	    wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+	wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index;
+
+	/* Set the initial discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+	        wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret != 0)) {
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+		wl_cfgp2p_set_discovery(wl, 0);
+		wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+		wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+		return 0;
+	}
+	return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @wl        : wl_private data
+ * Returns 0 if succes
+ */
+s32
+wl_cfgp2p_deinit_discovery(struct wl_priv *wl)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR(("do nothing, not initialized\n"));
+		return -1;
+	}
+	/* Set the discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+	/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+	ret = wl_cfgp2p_set_discovery(wl, 0);
+
+	/* Clear our saved WPS and P2P IEs for the discovery BSS.  The driver
+	 * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+	 * BSS.
+	 */
+
+	/* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+	 * have no discovery BSS.
+	 */
+	wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+	wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+	return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @wl	: wl_private data
+ * @ie  : probe request ie (WPS IE + P2P IE)
+ * @ie_len   : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len)
+{
+	s32 ret = BCME_OK;
+	if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+		CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+		goto set_ie;
+	}
+
+	wl_set_p2p_status(wl, DISCOVERY_ON);
+
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wl_cfgp2p_init_discovery(wl);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" init discovery error %d\n", ret));
+		goto exit;
+	}
+	/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+	 * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+	 * Some peer devices may not initiate WPS with us if this bit is not set.
+	 */
+	ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE),
+			"wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" wsec error %d\n", ret));
+	}
+set_ie:
+	ret = wl_cfgp2p_set_managment_ie(wl, dev,
+	            wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE),
+	            VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+		goto exit;
+	}
+exit:
+	return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @wl       : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" enter\n"));
+	wl_clr_p2p_status(wl, DISCOVERY_ON);
+
+	if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR((" do nothing, not initialized\n"));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret < 0)) {
+
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+	}
+	/* Do a scan abort to stop the driver's scan engine in case it is still
+	 * waiting out an action frame tx dwell time.
+	 */
+#ifdef NOT_YET
+	if (wl_get_p2p_status(wl, SCANNING)) {
+		p2pwlu_scan_abort(hdl, FALSE);
+	}
+#endif
+	wl_clr_p2p_status(wl, DISCOVERY_ON);
+	ret = wl_cfgp2p_deinit_discovery(wl);
+
+exit:
+	return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active,
+	u32 num_chans, u16 *channels,
+	s32 search_state, u16 action, u32 bssidx)
+{
+	s32 ret = BCME_OK;
+	s32 memsize;
+	s32 eparams_size;
+	u32 i;
+	s8 *memblk;
+	wl_p2p_scan_t *p2p_params;
+	wl_escan_params_t *eparams;
+	wlc_ssid_t ssid;
+	/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 10
+
+	wl_set_p2p_status(wl, SCANNING);
+	/* Allocate scan params which need space for 3 channels and 0 ssids */
+	eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+	    OFFSETOF(wl_escan_params_t, params)) +
+		num_chans * sizeof(eparams->params.channel_list[0]);
+
+	memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+	memblk = scanparambuf;
+	if (memsize > sizeof(scanparambuf)) {
+		CFGP2P_ERR((" scanpar buf too small (%u > %u)\n",
+		    memsize, sizeof(scanparambuf)));
+		return -1;
+	}
+	memset(memblk, 0, memsize);
+	memset(ioctlbuf, 0, sizeof(ioctlbuf));
+	if (search_state == WL_P2P_DISC_ST_SEARCH) {
+		/*
+		 * If we in SEARCH STATE, we don't need to set SSID explictly
+		 * because dongle use P2P WILDCARD internally by default
+		 */
+		wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+		ssid.SSID_len = htod32(0);
+
+	} else if (search_state == WL_P2P_DISC_ST_SCAN) {
+		/* SCAN STATE 802.11 SCAN
+		 * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+		 * So if P2P_find command with type=progressive,
+		 * we have to set ssid to P2P WILDCARD because
+		 * we just do broadcast scan unless setting SSID
+		 */
+		strcpy(ssid.SSID, WL_P2P_WILDCARD_SSID);
+		ssid.SSID_len = htod32(WL_P2P_WILDCARD_SSID_LEN);
+		wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+	}
+
+
+	/* Fill in the P2P scan structure at the start of the iovar param block */
+	p2p_params = (wl_p2p_scan_t*) memblk;
+	p2p_params->type = 'E';
+	/* Fill in the Scan structure that follows the P2P scan structure */
+	eparams = (wl_escan_params_t*) (p2p_params + 1);
+	eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+	if (active)
+		eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+	else
+		eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+	memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	if (ssid.SSID_len)
+		memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+	eparams->params.nprobes = htod32(P2PAPI_SCAN_NPROBES);
+	eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+	eparams->params.active_time = htod32(-1);
+	eparams->params.passive_time = htod32(-1);
+	eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	    (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	for (i = 0; i < num_chans; i++) {
+		eparams->params.channel_list[i] = htodchanspec(channels[i]);
+	}
+	eparams->version = htod32(ESCAN_REQ_VERSION);
+	eparams->action =  htod16(action);
+	eparams->sync_id = htod16(0x1234);
+	CFGP2P_INFO(("SCAN CHANNELS : "));
+
+	for (i = 0; i < num_chans; i++) {
+		if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+		else CFGP2P_INFO((",%d", channels[i]));
+	}
+
+	CFGP2P_INFO(("\n"));
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_scan",
+	            memblk, memsize, smbuf, sizeof(ioctlbuf), bssidx);
+	return ret;
+}
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Delete and Set a management ie to firmware
+ * Parameters:
+ * @wl       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * @pktflag  : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ *                                 VNDR_IE_ASSOCREQ_FLAG)
+ * @ie       : probe request ie (WPS IE + P2P IE)
+ * @ie_len   : probe request ie length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_managment_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+    s32 pktflag, const u8 *p2p_ie, u32 p2p_ie_len)
+{
+	/* Vendor-specific Information Element ID */
+#define VNDR_SPEC_ELEMENT_ID 0xdd
+	s32 ret = BCME_OK;
+	u32 pos;
+	u8  *ie_buf;
+	u8  *mgmt_ie_buf;
+	u32 mgmt_ie_buf_len;
+	u32 *mgmt_ie_len;
+	u8 ie_id, ie_len;
+	u8 delete = 0;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len)
+	if (bssidx == -1)
+		return BCME_BADARG;
+	if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+		bssidx =  wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+	switch (pktflag) {
+		case VNDR_IE_PRBREQ_FLAG :
+			mgmt_ie_buf = IE_TYPE(probe_req, bssidx);
+			mgmt_ie_len = &IE_TYPE_LEN(probe_req, bssidx);
+			mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, bssidx));
+			break;
+		case VNDR_IE_PRBRSP_FLAG :
+			mgmt_ie_buf = IE_TYPE(probe_res, bssidx);
+			mgmt_ie_len = &IE_TYPE_LEN(probe_res, bssidx);
+			mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, bssidx));
+			break;
+		case VNDR_IE_ASSOCREQ_FLAG :
+			mgmt_ie_buf = IE_TYPE(assoc_req, bssidx);
+			mgmt_ie_len = &IE_TYPE_LEN(assoc_req, bssidx);
+			mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, bssidx));
+			break;
+		case VNDR_IE_ASSOCRSP_FLAG :
+			mgmt_ie_buf = IE_TYPE(assoc_res, bssidx);
+			mgmt_ie_len = &IE_TYPE_LEN(assoc_res, bssidx);
+			mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, bssidx));
+			break;
+		case VNDR_IE_BEACON_FLAG :
+			mgmt_ie_buf = IE_TYPE(beacon, bssidx);
+			mgmt_ie_len = &IE_TYPE_LEN(beacon, bssidx);
+			mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, bssidx));
+			break;
+		default:
+			mgmt_ie_buf = NULL;
+			mgmt_ie_len = NULL;
+			CFGP2P_ERR(("not suitable type\n"));
+			return -1;
+	}
+	/* Add if there is any extra IE */
+	if (p2p_ie && p2p_ie_len) {
+		CFGP2P_INFO(("Request has extra IE"));
+		if (p2p_ie_len > mgmt_ie_buf_len) {
+			CFGP2P_ERR(("extra IE size too big\n"));
+			ret = -ENOMEM;
+		} else {
+			if (mgmt_ie_buf != NULL) {
+				if ((p2p_ie_len == *mgmt_ie_len) &&
+				     (memcmp(mgmt_ie_buf, p2p_ie, p2p_ie_len) == 0)) {
+					CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+					goto exit;
+				}
+				pos = 0;
+				delete = 1;
+				ie_buf = (u8 *) mgmt_ie_buf;
+				while (pos < *mgmt_ie_len) {
+					ie_id = ie_buf[pos++];
+					ie_len = ie_buf[pos++];
+					CFGP2P_INFO(("DELELED ID(%d), Len(%d),"
+						"OUI(%02x:%02x:%02x)\n",
+						ie_id, ie_len, ie_buf[pos],
+						ie_buf[pos+1], ie_buf[pos+2]));
+					ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag,
+					    ie_buf+pos, VNDR_SPEC_ELEMENT_ID,
+						ie_buf+pos+3, ie_len-3, delete);
+					pos += ie_len;
+				}
+
+			}
+			/* save the current IE in wl struct */
+			memcpy(mgmt_ie_buf, p2p_ie, p2p_ie_len);
+			*mgmt_ie_len = p2p_ie_len;
+			pos = 0;
+			ie_buf = (u8 *) p2p_ie;
+			delete = 0;
+			while (pos < p2p_ie_len) {
+				ie_id = ie_buf[pos++];
+				ie_len = ie_buf[pos++];
+				if ((ie_id == DOT11_MNG_VS_ID) &&
+				   (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+					wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) {
+					CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :"
+						"%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+						ie_buf[pos+1], ie_buf[pos+2]));
+					ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, ie_buf+pos,
+					    VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, ie_len-3, delete);
+				}
+				pos += ie_len;
+			}
+		}
+
+	}
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+	return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @wl       : wl_private data
+ * @bssidx   : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx)
+{
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+	if (bssidx < 0) {
+		CFGP2P_ERR(("invalid bssidx\n"));
+		return BCME_BADARG;
+	}
+	INIT_IE(probe_req, bssidx);
+	INIT_IE(probe_res, bssidx);
+	INIT_IE(assoc_req, bssidx);
+	INIT_IE(assoc_res, bssidx);
+	INIT_IE(beacon, bssidx);
+
+
+	return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+	/* If the contents match the OUI and the type */
+	if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+	        !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+	        type == ie[TLV_BODY_OFF + oui_len]) {
+		return TRUE;
+	}
+
+	if (tlvs == NULL)
+		return FALSE;
+	/* point to the next ie */
+	ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+
+	return FALSE;
+}
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_p2p_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+static s32
+wl_cfgp2p_vndr_ie(struct net_device *ndev, s32 bssidx, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete)
+{
+	s32 err = BCME_OK;
+	s32 buf_len;
+	s32 iecount;
+
+	vndr_ie_setbuf_t *ie_setbuf;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+		CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	buf_len = sizeof(vndr_ie_setbuf_t) + data_len - 1;
+	ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+	CFGP2P_INFO((" ie_id : %02x, data length : %d\n", ie_id, data_len));
+	if (!ie_setbuf) {
+
+		CFGP2P_ERR(("Error allocating buffer for IE\n"));
+		return -ENOMEM;
+	}
+	if (delete)
+		strcpy(ie_setbuf->cmd, "del");
+	else
+		strcpy(ie_setbuf->cmd, "add");
+	/* Buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int));
+	pktflag = htod32(pktflag);
+	memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag,
+	    &pktflag, sizeof(uint32));
+	ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+	ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len
+	        = (uchar)(data_len + VNDR_IE_MIN_LEN);
+	memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, oui, 3);
+	memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, data, data_len);
+	err = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", ie_setbuf, buf_len,
+		ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+	CFGP2P_INFO(("vndr_ie iovar returns %d\n", err));
+	kfree(ie_setbuf);
+	return err;
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @wl       : wl_private data
+ * @ndev     : net device to search bssidx
+ *  Returns bssidx for ndev
+ */
+s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev)
+{
+	u32 i;
+	s32 index = -1;
+
+	if (ndev == NULL) {
+		CFGP2P_ERR((" ndev is NULL\n"));
+		goto exit;
+	}
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+	    if (ndev == wl_to_p2p_bss_ndev(wl, i)) {
+			index = wl_to_p2p_bss_bssidx(wl, i);
+			break;
+		}
+	}
+exit:
+	return index;
+}
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+            const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+
+	CFGP2P_DBG((" Enter\n"));
+	/* TODO : have to acquire bottom half lock ? */
+	if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) {
+		wl_set_p2p_status(wl, LISTEN_EXPIRED);
+
+		if (wl->p2p.listen_timer)
+			del_timer_sync(wl->p2p.listen_timer);
+
+		cfg80211_remain_on_channel_expired(ndev, wl->cache_cookie, &wl->remain_on_chan,
+		    wl->remain_on_chan_type, GFP_KERNEL);
+	}
+
+	return ret;
+
+}
+
+/*
+ *  Timer expire callback function for LISTEN
+ *  We can't report cfg80211_remain_on_channel_expired from Timer ISR context, 
+ *  so lets do it from thread context.
+ */
+static void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct wl_priv *wl = (struct wl_priv *) data;
+
+	CFGP2P_DBG((" Enter\n"));
+	msg.event_type =  hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+}
+
+/* 
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters   :
+ * @wl          : wl_private data
+ * @channel     : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms)
+{
+#define INIT_TIMER(timer, func, duration, extra_delay)	\
+	do {                   \
+		init_timer(timer); \
+		timer->function = func; \
+		timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+		timer->data = (unsigned long) wl; \
+		add_timer(timer); \
+	} while (0);
+
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" Enter\n"));
+	CFGP2P_INFO(("Channel : %d, Duration : %d\n", channel, duration_ms));
+	if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) {
+
+		CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+		ret = BCME_NOTREADY;
+		goto exit;
+	}
+
+	wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+	wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+	            wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+	if (wl->p2p.listen_timer)
+		del_timer_sync(wl->p2p.listen_timer);
+
+	wl->p2p.listen_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+
+	if (wl->p2p.listen_timer == NULL) {
+		CFGP2P_ERR(("listen_timer allocation failed\n"));
+		return -ENOMEM;
+	}
+
+	/*  We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle , 
+	 *  otherwise we will wait up to duration_ms + 10ms
+	 */
+	INIT_TIMER(wl->p2p.listen_timer, wl_cfgp2p_listen_expired, duration_ms, 20);
+
+#undef INIT_TIMER
+exit:
+	return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" Enter\n"));
+	if (!wl_get_p2p_status(wl, DISCOVERY_ON)) {
+
+		CFGP2P_ERR((" do nothing, discovery is off\n"));
+		return ret;
+	}
+	if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) {
+		CFGP2P_ERR(("already : %d\n", enable));
+		return ret;
+	}
+
+	wl_chg_p2p_status(wl, SEARCH_ENABLED);
+	/* When disabling Search, reset the WL driver's p2p discovery state to
+	 * WL_P2P_DISC_ST_SCAN.
+	 */
+	if (!enable) {
+		wl_clr_p2p_status(wl, SCANNING);
+		(void) wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+		            wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+	}
+
+	return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+            const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	u32 event_type = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+	CFGP2P_DBG((" Enter\n"));
+	if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+		CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+		if (status == WLC_E_STATUS_SUCCESS)
+			wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
+		else
+			CFGP2P_ERR(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+		wake_up_interruptible(&wl->dongle_event_wait);
+	} else {
+		CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+					"status : %d\n", status));
+
+	}
+
+	return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx)
+{
+	s32 ret = BCME_OK;
+	s32 timeout = 0;
+
+
+	CFGP2P_INFO(("\n"));
+	CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+	    af_params->channel, af_params->dwell_time));
+
+	wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
+#define MAX_WAIT_TIME 2000
+	if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+		bssidx =  wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, "actframe",
+	           af_params, sizeof(*af_params), ioctlbuf, sizeof(ioctlbuf), bssidx);
+
+	if (ret < 0) {
+
+		CFGP2P_ERR((" sending action frame is failed\n"));
+		goto exit;
+	}
+	timeout = wait_event_interruptible_timeout(wl->dongle_event_wait,
+	                (wl_get_p2p_status(wl, ACTION_TX_COMPLETED) == TRUE),
+	                    msecs_to_jiffies(MAX_WAIT_TIME));
+
+	if (timeout > 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
+		CFGP2P_INFO(("tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else {
+		ret = BCME_ERROR;
+		CFGP2P_INFO(("tx action frame operation is failed\n"));
+	}
+exit:
+	CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+#undef MAX_WAIT_TIME
+	return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+            struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+	memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+	memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+	/* Generate the P2P Device Address.  This consists of the device's
+	 * primary MAC address with the locally administered bit set.
+	 */
+	memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+	out_dev_addr->octet[0] |= 0x02;
+
+	/* Generate the P2P Interface Address.  If the discovery and connection
+	 * BSSCFGs need to simultaneously co-exist, then this address must be
+	 * different from the P2P Device Address.
+	 */
+	memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+	out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+	u16 len = ie->len;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+	CFGP2P_DBG((" Enter\n"));
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+	/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			if (subelt_id == P2P_SEID_INTINTADDR) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_INFO) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_GROUP_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+			}			return;
+		} else {
+			CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+		}
+		subel += subelt_len;
+	}
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup().  DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+	s32 result, val;
+	bool isup = false;
+	s8 getbuf[64];
+
+	/* Check if the BSS is up */
+	*(int*)getbuf = -1;
+	result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+	                    sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0);
+	if (result != 0) {
+		CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result));
+		CFGP2P_ERR(("NOTE: this ioctl error is normal "
+					"when the BSS has not been created yet.\n"));
+	} else {
+		val = *(int*)getbuf;
+		val = dtoh32(val);
+		CFGP2P_INFO(("---wl bss -C %d   ==> %d\n", bsscfg_idx, val));
+		isup = (val ? TRUE : FALSE);
+	}
+	return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+	s32 ret = BCME_OK;
+	s32 val = up ? 1 : 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+	CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		ioctlbuf, sizeof(ioctlbuf));
+
+	if (ret != 0) {
+		CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+	}
+
+	return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev)
+{
+	s32 ret = BCME_OK;
+	s32 p2p_supported = 0;
+	ret = wldev_iovar_getint(ndev, "p2p",
+	               &p2p_supported);
+	if (ret < 0) {
+	    CFGP2P_ERR(("wl p2p error %d\n", ret));
+		return 0;
+	}
+	if (p2p_supported)
+		CFGP2P_INFO(("p2p is supported\n"));
+
+	return p2p_supported;
+}
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct wl_priv *wl)
+{
+	if (wl->p2p.listen_timer)
+		del_timer_sync(wl->p2p.listen_timer);
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 0000000..27aef59
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,224 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h,v 1.1.4.1.2.8 2011/02/09 01:37:52 Exp $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct wl_priv;
+extern u32 wl_dbg_level;
+
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library.  Do not
+ * confuse this with a bsscfg index.  This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+	P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+	P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+	P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+#define IE_MAX_LEN 300
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+	u8   p2p_probe_req_ie[IE_MAX_LEN];
+	u8   p2p_probe_res_ie[IE_MAX_LEN];
+	u8   p2p_assoc_req_ie[IE_MAX_LEN];
+	u8   p2p_assoc_res_ie[IE_MAX_LEN];
+	u8   p2p_beacon_ie[IE_MAX_LEN];
+	u32 p2p_probe_req_ie_len;
+	u32 p2p_probe_res_ie_len;
+	u32 p2p_assoc_req_ie_len;
+	u32 p2p_assoc_res_ie_len;
+	u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+	u32 bssidx;
+	struct net_device *dev;
+	struct p2p_saved_ie saved_ie;
+};
+
+struct p2p_info {
+	bool on;    /* p2p on/off switch */
+	bool scan;
+	bool vif_created;
+	s8 vir_ifname[IFNAMSIZ];
+	unsigned long status;
+	struct ether_addr dev_addr;
+	struct ether_addr int_addr;
+	struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+	struct timer_list *listen_timer;
+	wlc_ssid_t ssid;
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+	WLP2P_STATUS_DISCOVERY_ON = 0,
+	WLP2P_STATUS_SEARCH_ENABLED,
+	WLP2P_STATUS_IF_ADD,
+	WLP2P_STATUS_IF_DEL,
+	WLP2P_STATUS_IF_DELETING,
+	WLP2P_STATUS_IF_CHANGING,
+	WLP2P_STATUS_IF_CHANGED,
+	WLP2P_STATUS_LISTEN_EXPIRED,
+	WLP2P_STATUS_ACTION_TX_COMPLETED,
+	WLP2P_STATUS_SCANNING
+};
+
+
+#define wl_to_p2p_bss_ndev(w, type) 	((wl)->p2p.bss_idx[type].dev)
+#define wl_to_p2p_bss_bssidx(w, type) 	((wl)->p2p.bss_idx[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(w, type) 	((wl)->p2p.bss_idx[type].saved_ie)
+#define wl_to_p2p_bss(wl, type) ((wl)->p2p.bss_idx[type])
+#define wl_get_p2p_status(wl, stat)   (test_bit(WLP2P_STATUS_ ## stat, &(wl)->p2p.status))
+#define wl_set_p2p_status(wl, stat)   (set_bit(WLP2P_STATUS_ ## stat, &(wl)->p2p.status))
+#define wl_clr_p2p_status(wl, stat)   (clear_bit(WLP2P_STATUS_ ## stat, &(wl)->p2p.status))
+#define wl_chg_p2p_status(wl, stat)   (change_bit(WLP2P_STATUS_ ## stat, &(wl)->p2p.status))
+#define p2p_on(wl) ((wl)->p2p.on)
+#define p2p_scan(wl) ((wl)->p2p.scan)
+
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+#define CFGP2P_ERR(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_ERR "CFGP2P-ERROR) %s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_INFO(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_ERR "CFGP2P-INFO) %s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_DBG(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_DBG) {			\
+			printk(KERN_ERR "CFGP2P-DEBUG) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+
+
+extern void
+wl_cfgp2p_init_priv(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode,
+            u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans,
+	u16 *channels,
+	s32 search_state, u16 action, u32 bssidx);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern s32
+wl_cfgp2p_set_managment_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+            s32 pktflag, const u8 *p2p_ie, u32 p2p_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+            const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+            const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+            struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct wl_priv *wl);
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define IS_P2P_SSID(ssid) (memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) == 0)
+#endif				/* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 0000000..0b99557
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,49 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_dbg.h,v 1.115.6.3 2010-12-15 21:42:23 Exp $
+ */
+
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_PRINT(args)      printf args
+
+
+
+#define WL_NONE(args)
+
+#define WL_ERROR(args)
+#define WL_TRACE(args)
+
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 0000000..1f811a9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,8607 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c,v 1.132.2.18 2011-02-05 01:44:47 Exp $
+ */
+
+#include <wlioctl.h>
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+
+typedef void wlc_info_t;
+typedef void wl_info_t;
+typedef const struct si_pub  si_t;
+#include <wlioctl.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+#define WL_ASSOC(x)
+#define WL_INFORM(x)
+#define WL_WSEC(x)
+#define WL_SCAN(x)
+
+
+#ifdef PNO_SET_DEBUG
+#define WL_PNO(x)	printf x
+#else
+#define WL_PNO(x)
+#endif
+
+
+#define JF2MS ((((jiffies / HZ) * 1000) + ((jiffies % HZ) * 1000) / HZ))
+
+#ifdef COEX_DBG       
+#define WL_TRACE_COEX(x) printf("TS:%lu ", JF2MS); \
+							printf x
+#else
+#define WL_TRACE_COEX(x)
+#endif
+
+#ifdef SCAN_DBG        
+#define WL_TRACE_SCAN(x) printf("TS:%lu ", JF2MS); \
+							printf x
+#else
+#define WL_TRACE_SCAN(x)
+#endif
+
+
+#include <wl_iw.h>
+
+
+
+
+#define IW_WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+
+#include <linux/rtnetlink.h>
+
+#define WL_IW_USE_ISCAN  1
+#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS  1
+
+#ifdef OEM_CHROMIUMOS
+bool g_set_essid_before_scan = TRUE;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+	struct mutex  g_wl_ss_scan_lock; 
+#endif 
+
+#if defined(SOFTAP)
+#define WL_SOFTAP(x)
+static struct net_device *priv_dev;
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;
+static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap);
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac);
+#endif 
+
+
+#define WL_IW_IOCTL_CALL(func_call) \
+	do {				\
+		func_call;		\
+	} while (0)
+
+#define RETURN_IF_EXTRA_NULL(extra) \
+	if (!extra) { \
+		WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \
+		return -EINVAL; \
+	}
+
+static int		g_onoff = G_WLAN_SET_ON;
+wl_iw_extra_params_t	g_wl_iw_params;
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+
+static struct mutex	wl_cache_lock;
+static struct mutex	wl_softap_lock;
+
+#define DHD_OS_MUTEX_INIT(a) mutex_init(a)
+#define DHD_OS_MUTEX_LOCK(a) mutex_lock(a)
+#define DHD_OS_MUTEX_UNLOCK(a) mutex_unlock(a)
+
+#else
+
+#define DHD_OS_MUTEX_INIT(a)
+#define DHD_OS_MUTEX_LOCK(a)
+#define DHD_OS_MUTEX_UNLOCK(a)
+
+#endif 
+
+#include <bcmsdbus.h>
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+extern void dhd_dev_init_ioctl(struct net_device *dev);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#ifdef CONFIG_WIRELESS_EXT
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#endif 
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd)	((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd)	((cmd) - IWEVFIRST)
+#endif 
+
+static void *g_scan = NULL;
+static volatile uint g_scan_specified_ssid;	
+static wlc_ssid_t g_specific_ssid;		
+
+static wlc_ssid_t g_ssid;
+
+bool btcoex_is_sco_active(struct net_device *dev);  
+static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl;	
+#if defined(CONFIG_FIRST_SCAN)
+static volatile uint g_first_broadcast_scan;	
+static volatile uint g_first_counter_scans;
+#define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3
+#endif 
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else 
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif 
+
+#if defined(WL_IW_USE_ISCAN)
+#if  !defined(CSCAN)
+static void wl_iw_free_ss_cache(void);
+static int   wl_iw_run_ss_cache_timer(int kick_off);
+#endif 
+#if defined(CONFIG_FIRST_SCAN)
+int  wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+#endif 
+static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len);
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+
+
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	int    iscan_state;
+	iscan_buf_t * list_hdr;
+	iscan_buf_t * list_cur;
+
+	
+	tsk_ctl_t tsk_ctl;
+
+	uint32 scan_flag;	
+#if defined CSCAN
+	char ioctlbuf[WLC_IOCTL_MEDLEN];
+#else
+	char ioctlbuf[WLC_IOCTL_SMLEN];
+#endif 
+	
+	wl_iscan_params_t *iscan_ex_params_p;
+	int iscan_ex_param_size;
+} iscan_info_t;
+
+
+
+#define  COEX_DHCP 1	
+#ifdef COEX_DHCP
+
+#define BT_DHCP_eSCO_FIX 
+#define BT_DHCP_USE_FLAGS  
+#define BT_DHCP_OPPORTUNITY_WINDOW_TIME	 2500 
+#define BT_DHCP_FLAG_FORCE_TIME 5500 
+
+
+
+static int wl_iw_set_btcoex_dhcp(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+);
+
+static void wl_iw_bt_flag_set(struct net_device *dev, bool set);
+static void wl_iw_bt_release(void);
+
+typedef enum bt_coex_status {
+	BT_DHCP_IDLE = 0,
+	BT_DHCP_START,
+	BT_DHCP_OPPORTUNITY_WINDOW,
+	BT_DHCP_FLAG_FORCE_TIMEOUT
+} coex_status_t;
+
+
+typedef struct bt_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	uint32 ts_dhcp_start; 
+	uint32 ts_dhcp_ok;    
+	bool	dhcp_done; 
+	int	bt_state;
+
+	
+	tsk_ctl_t tsk_ctl;
+
+} bt_info_t;
+
+bt_info_t *g_bt = NULL;
+static void wl_iw_bt_timerfunc(ulong data);
+#endif 
+iscan_info_t *g_iscan = NULL;
+void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+#endif 
+
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+);
+
+#ifndef CSCAN
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+);
+
+static uint
+wl_iw_get_scan_prep(
+	wl_scan_results_t *list,
+	struct iw_request_info *info,
+	char *extra,
+	short max_size
+);
+#endif 
+
+static void
+swap_key_from_BE(
+	wl_wsec_key_t *key
+)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void
+swap_key_to_BE(
+	wl_wsec_key_t *key
+)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+	struct net_device *dev,
+	int cmd,
+	void *arg,
+	int len
+)
+{
+	struct ifreq ifr;
+	wl_ioctl_t ioc;
+	mm_segment_t fs;
+	int ret = -EINVAL;
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return ret;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_INFORM(("%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d ,\n",
+		__FUNCTION__, current->pid, cmd, arg, len));
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		memset(&ioc, 0, sizeof(ioc));
+		ioc.cmd = cmd;
+		ioc.buf = arg;
+		ioc.len = len;
+
+		strcpy(ifr.ifr_name, dev->name);
+		ifr.ifr_data = (caddr_t) &ioc;
+
+		
+		ret = dev_open(dev);
+		if (ret) {
+			WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret));
+			net_os_wake_unlock(dev);
+			return ret;
+		}
+
+		fs = get_fs();
+		set_fs(get_ds());
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
+		ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+		ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif 
+		set_fs(fs);
+	}
+	else {
+		WL_TRACE(("%s: call after driver stop : ignored\n", __FUNCTION__));
+	}
+
+	net_os_wake_unlock(dev);
+
+	return ret;
+}
+
+
+static int
+dev_wlc_intvar_get_reg(
+	struct net_device *dev,
+	char *name,
+	uint  reg,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	len = bcm_mkiovar(name, (char *)(&reg), sizeof(reg), (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+	return (error);
+}
+
+
+static int
+dev_wlc_intvar_set_reg(
+	struct net_device *dev,
+	char *name,
+	char *addr,
+	char * val)
+{
+	char reg_addr[8];
+
+	memset(reg_addr, 0, sizeof(reg_addr));
+	memcpy((char *)&reg_addr[0], (char *)addr, 4);
+	memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+	return (dev_wlc_bufvar_set(dev, name,  (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+
+
+
+static int
+dev_wlc_intvar_set(
+	struct net_device *dev,
+	char *name,
+	int val)
+{
+	char buf[WLC_IOCTL_SMLEN];
+	uint len;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	ASSERT(len);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+dev_iw_iovar_setbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+
+	if (iolen == 0)
+		return 0;
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+#endif 
+
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+	struct net_device *dev,
+	char *name,
+	char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#else
+	static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#endif 
+	uint buflen;
+
+	buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf));
+	ASSERT(buflen);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen));
+}
+#endif 
+
+
+static int
+dev_wlc_bufvar_get(
+	struct net_device *dev,
+	char *name,
+	char *buf, int buflen)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#else
+	static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
+#endif 
+	int error;
+	uint len;
+
+	len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	if (!error)
+		bcopy(ioctlbuf, buf, buflen);
+
+	return (error);
+}
+
+
+
+static int
+dev_wlc_intvar_get(
+	struct net_device *dev,
+	char *name,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	uint data_null;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+
+	return (error);
+}
+
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_active_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int as = 0;
+	int error = 0;
+	char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+	if (g_iscan->iscan_state == ISCAN_STATE_IDLE)
+#endif 
+		error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+#if defined(WL_IW_USE_ISCAN)
+	else
+		g_iscan->scan_flag = as;
+#endif 
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_set_passive_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int ps = 1;
+	int error = 0;
+	char *p = extra;
+
+#if defined(WL_IW_USE_ISCAN)
+	if (g_iscan->iscan_state == ISCAN_STATE_IDLE) {
+#endif 
+
+		 
+		if (g_scan_specified_ssid == 0) {
+			error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &ps, sizeof(ps));
+		}
+#if defined(WL_IW_USE_ISCAN)
+	}
+	else
+		g_iscan->scan_flag = ps;
+#endif 
+
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+
+static int
+wl_iw_set_txpower(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	int txpower = -1;
+
+	txpower = bcm_atoi(extra + strlen(TXPOWER_SET_CMD) + 1);
+	if ((txpower >= 0) && (txpower <= 127))
+	{
+		txpower |= WL_TXPWR_OVERRIDE;
+		txpower = htod32(txpower);
+
+		error = dev_wlc_intvar_set(dev, "qtxpower", txpower);
+		p += snprintf(p, MAX_WX_STRING, "OK");
+		WL_TRACE(("%s: set TXpower 0x%X is OK\n", __FUNCTION__, txpower));
+	} else {
+		WL_ERROR(("%s: set tx power failed\n", __FUNCTION__));
+		p += snprintf(p, MAX_WX_STRING, "FAIL");
+	}
+
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_get_macaddr(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error;
+	char buf[128];
+	struct ether_addr *id;
+	char *p = extra;
+
+	
+	strcpy(buf, "cur_etheraddr");
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, buf, sizeof(buf));
+	id = (struct ether_addr *) buf;
+	p += snprintf(p, MAX_WX_STRING, "Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+		id->octet[0], id->octet[1], id->octet[2],
+		id->octet[3], id->octet[4], id->octet[5]);
+	wrqu->data.length = p - extra + 1;
+
+	return error;
+}
+
+
+
+static int
+wl_iw_set_country(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	char country_code[WLC_CNTRY_BUF_SZ];
+	int error = 0;
+	char *p = extra;
+	int country_offset;
+	int country_code_size;
+	wl_country_t cspec = {{0}, 0, {0}};
+	char smbuf[WLC_IOCTL_SMLEN];
+	scb_val_t scbval;
+
+	cspec.rev = -1;
+	memset(country_code, 0, sizeof(country_code));
+	memset(smbuf, 0, sizeof(smbuf));
+
+	
+	country_offset = strcspn(extra, " ");
+	country_code_size = strlen(extra) - country_offset;
+
+	
+	if (country_offset != 0) {
+		strncpy(country_code, extra + country_offset +1,
+			MIN(country_code_size, sizeof(country_code)));
+
+		
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+			WL_ERROR(("%s: set country failed due to Disassoc error\n", __FUNCTION__));
+			goto exit_failed;
+		}
+
+		memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+		memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+
+		get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+
+		
+		if ((error = dev_iw_iovar_setbuf(dev, "country", &cspec,
+			sizeof(cspec), smbuf, sizeof(smbuf))) >= 0) {
+			p += snprintf(p, MAX_WX_STRING, "OK");
+			WL_ERROR(("%s: set country for %s as %s rev %d is OK\n",
+				__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+			dhd_bus_country_set(dev, &cspec);
+			goto exit;
+		}
+	}
+
+	WL_ERROR(("%s: set country for %s as %s rev %d failed\n",
+		__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+
+exit_failed:
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_set_power_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	static int  pm = PM_FAST;
+	int  pm_local = PM_OFF;
+	char powermode_val = 0;
+
+	WL_TRACE_COEX(("%s: DHCP session cmd:%s\n", __FUNCTION__, extra));
+
+	strncpy((char *)&powermode_val, extra + strlen("POWERMODE") +1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+		WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+		dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
+		dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local));
+
+		
+		net_os_set_packet_filter(dev, 0);
+
+#ifdef COEX_DHCP
+		g_bt->ts_dhcp_start = JF2MS;
+		g_bt->dhcp_done = false;
+		WL_TRACE_COEX(("%s: DHCP start, pm:%d changed to pm:%d\n",
+			__FUNCTION__, pm, pm_local));
+
+#endif 
+	} else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) {
+		
+
+		dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+
+		
+		net_os_set_packet_filter(dev, 1);
+
+#ifdef COEX_DHCP
+		g_bt->dhcp_done = true;
+		g_bt->ts_dhcp_ok = JF2MS;
+		WL_TRACE_COEX(("%s: DHCP done for:%d ms, restored pm:%d\n",
+			__FUNCTION__, (g_bt->ts_dhcp_ok - g_bt->ts_dhcp_start), pm));
+#endif 
+
+	} else {
+		WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+			__FUNCTION__));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+
+	return error;
+}
+
+
+bool btcoex_is_sco_active(struct net_device *dev)
+{
+	int ioc_res = 0;
+	bool res = false;
+	int sco_id_cnt = 0;
+	int param27;
+	int i;
+
+	for (i = 0; i < 12; i++) {
+
+		ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+		WL_TRACE_COEX(("%s, sample[%d], btc params: 27:%x\n",
+			__FUNCTION__, i, param27));
+
+		if (ioc_res < 0) {
+			WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__));
+			break;
+		}
+
+		if ((param27 & 0x6) == 2) { 
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			WL_TRACE_COEX(("%s, sco/esco detected, pkt id_cnt:%d  samples:%d\n",
+				__FUNCTION__, sco_id_cnt, i));
+			res = true;
+			break;
+		}
+
+		msleep(5);
+	}
+
+	return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+	static bool saved_status = false;
+
+	char buf_reg50va_dhcp_on[8] = { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+	char buf_reg51va_dhcp_on[8] = { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg64va_dhcp_on[8] = { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg65va_dhcp_on[8] = { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg71va_dhcp_on[8] = { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg50;
+	static uint32 saved_reg51;
+	static uint32 saved_reg64;
+	static uint32 saved_reg65;
+	static uint32 saved_reg71;
+
+	if (trump_sco) { 
+
+		
+		WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+
+		if  ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50,  &saved_reg50)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 51,  &saved_reg51)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 64,  &saved_reg64)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 65,  &saved_reg65)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 71,  &saved_reg71))) {
+
+			saved_status = TRUE;
+			WL_TRACE_COEX(("%s saved bt_params[50,51,64,65,71]:"
+				" 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				__FUNCTION__, saved_reg50, saved_reg51,
+				saved_reg64, saved_reg65, saved_reg71));
+
+		} else {
+			WL_ERROR((":%s: save btc_params failed\n",
+				__FUNCTION__));
+			saved_status = false;
+			return -1;
+		}
+
+		WL_TRACE_COEX(("override with [50,51,64,65,71]:"
+			" 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			*(u32 *)(buf_reg50va_dhcp_on+4),
+			*(u32 *)(buf_reg51va_dhcp_on+4),
+			*(u32 *)(buf_reg64va_dhcp_on+4),
+			*(u32 *)(buf_reg65va_dhcp_on+4),
+			*(u32 *)(buf_reg71va_dhcp_on+4)));
+
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg50va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg51va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg64va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg65va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg71va_dhcp_on[0], 8);
+
+		saved_status = true;
+
+	} else if (saved_status) {
+		
+		WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n"));
+
+		regaddr = 50;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg50);
+		regaddr = 51;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg51);
+		regaddr = 64;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg64);
+		regaddr = 65;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg65);
+		regaddr = 71;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg71);
+
+		WL_TRACE_COEX(("restore bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			saved_reg50, saved_reg51, saved_reg64,
+			saved_reg65, saved_reg71));
+
+		saved_status = false;
+	} else {
+		WL_ERROR((":%s att to restore not saved BTCOEX params\n",
+			__FUNCTION__));
+		return -1;
+	}
+	return 0;
+}
+#endif 
+
+
+static int
+wl_iw_get_power_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	int pm_local;
+	char *p = extra;
+
+	error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local));
+	if (!error) {
+		WL_TRACE(("%s: Powermode = %d\n", __func__, pm_local));
+		if (pm_local == PM_OFF)
+			pm_local = 1; 
+		else
+			pm_local = 0; 
+		p += snprintf(p, MAX_WX_STRING, "powermode = %d", pm_local);
+	}
+	else {
+		WL_TRACE(("%s: Error = %d\n", __func__, error));
+		p += snprintf(p, MAX_WX_STRING, "FAIL");
+	}
+	wrqu->data.length = p - extra + 1;
+	return error;
+}
+
+static int
+wl_iw_set_btcoex_dhcp(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	char powermode_val = 0;
+	char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+	char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+	char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg66;
+	static uint32 saved_reg41;
+	static uint32 saved_reg68;
+	static bool saved_status = FALSE;
+
+#ifdef COEX_DHCP
+	char buf_flag7_default[8] =   { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif 
+
+	
+	strncpy((char *)&powermode_val, extra + strlen("BTCOEXMODE") +1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+		WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+		
+		if ((saved_status == FALSE) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 66,  &saved_reg66)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 41,  &saved_reg41)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 68,  &saved_reg68)))   {
+				saved_status = TRUE;
+				WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+					saved_reg66, saved_reg41, saved_reg68));
+
+				
+
+				
+#ifdef COEX_DHCP
+				
+				if (btcoex_is_sco_active(dev)) {
+					
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg66va_dhcp_on[0],
+						sizeof(buf_reg66va_dhcp_on));
+					
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg41va_dhcp_on[0],
+						sizeof(buf_reg41va_dhcp_on));
+					
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg68va_dhcp_on[0],
+						sizeof(buf_reg68va_dhcp_on));
+					saved_status = TRUE;
+
+					g_bt->bt_state = BT_DHCP_START;
+					g_bt->timer_on = 1;
+					mod_timer(&g_bt->timer, g_bt->timer.expires);
+					WL_TRACE_COEX(("%s enable BT DHCP Timer\n",
+					__FUNCTION__));
+				}
+#endif 
+		}
+		else if (saved_status == TRUE) {
+			WL_ERROR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+		}
+	}
+	else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+		
+
+#ifdef COEX_DHCP
+		
+		WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__));
+		if (g_bt->timer_on) {
+			g_bt->timer_on = 0;
+			del_timer_sync(&g_bt->timer);
+
+			if (g_bt->bt_state != BT_DHCP_IDLE) {
+			
+				WL_TRACE_COEX(("%s bt->bt_state:%d\n",
+					__FUNCTION__, g_bt->bt_state));
+				
+				up(&g_bt->tsk_ctl.sema);
+			}
+		}
+
+		
+		if (saved_status == TRUE)
+			dev_wlc_bufvar_set(dev, "btc_flags",
+				(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+#endif 
+
+		
+		if (saved_status == TRUE) {
+			regaddr = 66;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg66);
+			regaddr = 41;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg41);
+			regaddr = 68;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg68);
+
+			WL_TRACE_COEX(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+				saved_reg66, saved_reg41, saved_reg68));
+		}
+		saved_status = FALSE;
+
+	}
+	else {
+		WL_ERROR(("%s Unkwown yet power setting, ignored\n",
+			__FUNCTION__));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "OK");
+
+	wrqu->data.length = p - extra + 1;
+
+	return error;
+}
+
+static int
+wl_iw_set_suspend(
+struct net_device *dev,
+struct iw_request_info *info,
+union iwreq_data *wrqu,
+char *extra
+)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+	suspend_flag = *(extra + strlen(SETSUSPEND_CMD) + 1) - '0';
+
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+
+	ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+	
+	if (ret_now != suspend_flag) {
+		if (!(ret = net_os_set_suspend(dev, ret_now)))
+			WL_ERROR(("%s: Suspend Flag %d -> %d\n",
+			          __FUNCTION__, ret_now, suspend_flag));
+		else
+			WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+	}
+
+	return ret;
+}
+
+static int
+wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len)
+{
+	int i, c;
+	char *p = ssid_buf;
+
+	if (ssid_len > 32) ssid_len = 32;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (int)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += sprintf(p, "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+
+	return p - ssid_buf;
+}
+
+static int
+wl_iw_get_link_speed(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = 0;
+	char *p = extra;
+	static int link_speed;
+
+	
+	net_os_wake_lock(dev);
+	if (g_onoff == G_WLAN_SET_ON) {
+		error = dev_wlc_ioctl(dev, WLC_GET_RATE, &link_speed, sizeof(link_speed));
+		link_speed *= 500000;
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "LinkSpeed %d", link_speed/1000000);
+
+	wrqu->data.length = p - extra + 1;
+
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_get_dtim_skip(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	char iovbuf[32];
+
+	net_os_wake_lock(dev);
+	if (g_onoff == G_WLAN_SET_ON) {
+
+			memset(iovbuf, 0, sizeof(iovbuf));
+			strcpy(iovbuf, "bcn_li_dtim");
+
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_VAR,
+				&iovbuf, sizeof(iovbuf))) >= 0) {
+
+				p += snprintf(p, MAX_WX_STRING, "Dtim_skip %d", iovbuf[0]);
+				WL_TRACE(("%s: get dtim_skip = %d\n", __FUNCTION__, iovbuf[0]));
+				wrqu->data.length = p - extra + 1;
+			}
+			else
+				WL_ERROR(("%s: get dtim_skip failed code %d\n",
+					__FUNCTION__, error));
+	}
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_set_dtim_skip(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	int bcn_li_dtim;
+	char iovbuf[32];
+
+	net_os_wake_lock(dev);
+	if (g_onoff == G_WLAN_SET_ON) {
+
+		bcn_li_dtim = htod32((uint)*(extra + strlen(DTIM_SKIP_SET_CMD) + 1) - '0');
+
+		if ((bcn_li_dtim >= 0) || ((bcn_li_dtim <= 5))) {
+
+			memset(iovbuf, 0, sizeof(iovbuf));
+			bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+				4, iovbuf, sizeof(iovbuf));
+
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_VAR,
+				&iovbuf, sizeof(iovbuf))) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+
+				
+				net_os_set_dtim_skip(dev, bcn_li_dtim);
+
+				WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__,
+					bcn_li_dtim));
+				goto exit;
+			}
+			else  WL_ERROR(("%s: set dtim_skip %d failed code %d\n",
+				__FUNCTION__, bcn_li_dtim, error));
+		}
+		else  WL_ERROR(("%s Incorrect dtim_skip setting %d, ignored\n",
+			__FUNCTION__, bcn_li_dtim));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_get_band(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	static int band;
+
+	net_os_wake_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		error = dev_wlc_ioctl(dev, WLC_GET_BAND, &band, sizeof(band));
+
+		p += snprintf(p, MAX_WX_STRING, "Band %d", band);
+
+		wrqu->data.length = p - extra + 1;
+	}
+
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+static int
+wl_iw_set_band(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	uint band;
+
+	net_os_wake_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_ON) {
+
+		band = htod32((uint)*(extra + strlen(BAND_SET_CMD) + 1) - '0');
+
+		if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+			
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_BAND,
+				&band, sizeof(band))) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+				WL_TRACE(("%s: set band %d OK\n", __FUNCTION__, band));
+				goto exit;
+			} else {
+				WL_ERROR(("%s: set band %d failed code %d\n", __FUNCTION__,
+				          band, error));
+			}
+		} else {
+			WL_ERROR(("%s Incorrect band setting %d, ignored\n", __FUNCTION__, band));
+		}
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+#ifdef PNO_SUPPORT
+
+static int
+wl_iw_set_pno_reset(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+
+	net_os_wake_lock(dev);
+	if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+		if ((error = dhd_dev_pno_reset(dev)) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+				WL_TRACE(("%s: set OK\n", __FUNCTION__));
+				goto exit;
+		}
+		else  WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+
+static int
+wl_iw_set_pno_enable(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error = -1;
+	char *p = extra;
+	int pfn_enabled;
+
+	net_os_wake_lock(dev);
+	pfn_enabled = htod32((uint)*(extra + strlen(PNOENABLE_SET_CMD) + 1) - '0');
+
+	if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) {
+
+		if ((error = dhd_dev_pno_enable(dev, pfn_enabled)) >= 0) {
+				p += snprintf(p, MAX_WX_STRING, "OK");
+				WL_TRACE(("%s: set OK\n", __FUNCTION__));
+				goto exit;
+		}
+		else  WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error));
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "FAIL");
+
+exit:
+	wrqu->data.length = p - extra + 1;
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+
+
+static int
+wl_iw_set_pno_set(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int res = -1;
+	wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+	int nssid = 0;
+	cmd_tlv_t *cmd_tlv_temp;
+	char *str_ptr;
+	int tlv_size_left;
+	int pno_time;
+	int pno_repeat;
+	int pno_freq_expo_max;
+#ifdef PNO_SET_DEBUG
+	int i;
+	char pno_in_example[] = {
+		'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+		'S', '1', '2', '0',
+		'S',
+		0x04,
+		'B', 'R', 'C', 'M',
+		'S',
+		0x04,
+		'G', 'O', 'O', 'G',
+		'T',
+		'1', 'E',
+		'R',
+		'2',
+		'M',
+		'2',
+		0x00
+		};
+#endif 
+
+	net_os_wake_lock(dev);
+	WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	if (wrqu->data.length < (strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))) {
+		WL_ERROR(("%s aggument=%d  less %d\n", __FUNCTION__,
+			wrqu->data.length, strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t)));
+		goto exit_proc;
+	}
+
+#ifdef PNO_SET_DEBUG
+	if (!(extra = kmalloc(sizeof(pno_in_example) +100, GFP_KERNEL))) {
+		res = -ENOMEM;
+		goto exit_proc;
+	}
+	memcpy(extra, pno_in_example, sizeof(pno_in_example));
+	wrqu->data.length = sizeof(pno_in_example);
+	for (i = 0; i < wrqu->data.length; i++)
+		printf("%02X ", extra[i]);
+	printf("\n");
+#endif 
+
+	str_ptr = extra;
+#ifdef PNO_SET_DEBUG
+	str_ptr +=  strlen("PNOSETUP ");
+	tlv_size_left = wrqu->data.length - strlen("PNOSETUP ");
+#else
+	str_ptr +=  strlen(PNOSETUP_SET_CMD);
+	tlv_size_left = wrqu->data.length - strlen(PNOSETUP_SET_CMD);
+#endif
+
+	cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+	pno_repeat = pno_freq_expo_max = 0;
+	
+	if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+		(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+		(cmd_tlv_temp->subver == PNO_TLV_SUBVERSION))
+	{
+		str_ptr += sizeof(cmd_tlv_t);
+		tlv_size_left  -= sizeof(cmd_tlv_t);
+
+		
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+			MAX_PFN_LIST_COUNT,
+			&tlv_size_left)) <= 0) {
+			WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		}
+		else {
+			if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+				WL_ERROR(("%s scan duration corrupted field size %d\n",
+					__FUNCTION__, tlv_size_left));
+				goto exit_proc;
+			}
+			str_ptr++;
+			pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+			WL_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+			
+			if (str_ptr[0] != 0) {
+				if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+					WL_ERROR(("%s pno repeat : corrupted field\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+				WL_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+				if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+					WL_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+				WL_PNO(("%s: pno_freq_expo_max=%d\n",
+					__FUNCTION__, pno_freq_expo_max));
+			}
+		}
+	}
+	else {
+		WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	
+	res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+	net_os_wake_unlock(dev);
+	return res;
+}
+#endif 
+
+static int
+wl_iw_get_rssi(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	static int rssi = 0;
+	static wlc_ssid_t ssid = {0};
+	int error = 0;
+	char *p = extra;
+	static char ssidbuf[SSID_FMT_BUF_LEN];
+	scb_val_t scb_val;
+
+	net_os_wake_lock(dev);
+
+	bzero(&scb_val, sizeof(scb_val_t));
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		error = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+		if (error) {
+			WL_ERROR(("%s: Fails %d\n", __FUNCTION__, error));
+			net_os_wake_unlock(dev);
+			return error;
+		}
+		rssi = dtoh32(scb_val.val);
+
+		error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
+		if (!error) {
+			ssid.SSID_len = dtoh32(ssid.SSID_len);
+			wl_format_ssid(ssidbuf, ssid.SSID, dtoh32(ssid.SSID_len));
+		}
+	}
+
+	p += snprintf(p, MAX_WX_STRING, "%s rssi %d ", ssidbuf, rssi);
+	wrqu->data.length = p - extra + 1;
+
+	net_os_wake_unlock(dev);
+	return error;
+}
+
+int
+wl_iw_send_priv_event(
+	struct net_device *dev,
+	char *flag
+)
+{
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd;
+
+	cmd = IWEVCUSTOM;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (strlen(flag) > sizeof(extra))
+		return -1;
+
+	strcpy(extra, flag);
+	wrqu.data.length = strlen(extra);
+	wireless_send_event(dev, cmd, &wrqu, extra);
+	net_os_wake_lock_timeout_enable(dev);
+	WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+	return 0;
+}
+
+
+int
+wl_control_wl_start(struct net_device *dev)
+{
+	wl_iw_t *iw;
+	int ret = 0;
+
+	WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+
+	if (!iw) {
+		WL_ERROR(("%s: wl is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	dhd_net_if_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+
+#if defined(BCMLXSDMMC)
+		sdioh_start(NULL, 0);
+#endif
+
+		ret = dhd_dev_reset(dev, 0);
+
+#if defined(BCMLXSDMMC)
+		sdioh_start(NULL, 1);
+#endif
+
+		dhd_dev_init_ioctl(dev);
+
+		g_onoff = G_WLAN_SET_ON;
+	}
+	WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+	dhd_net_if_unlock(dev);
+	return ret;
+}
+
+
+static int
+wl_iw_control_wl_off(
+	struct net_device *dev,
+	struct iw_request_info *info
+)
+{
+	wl_iw_t *iw;
+	int ret = 0;
+
+	WL_TRACE(("Enter %s\n", __FUNCTION__));
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+
+	if (!iw) {
+		WL_ERROR(("%s: wl is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	dhd_net_if_lock(dev);
+
+#ifdef SOFTAP
+	ap_cfg_running = FALSE;
+#endif 
+
+	if (g_onoff == G_WLAN_SET_ON) {
+		g_onoff = G_WLAN_SET_OFF;
+
+#if defined(WL_IW_USE_ISCAN)
+		g_iscan->iscan_state = ISCAN_STATE_IDLE;
+#endif 
+
+		dhd_dev_reset(dev, 1);
+
+#if defined(WL_IW_USE_ISCAN)
+#if !defined(CSCAN)
+		
+		wl_iw_free_ss_cache();
+		wl_iw_run_ss_cache_timer(0);
+		
+		g_ss_cache_ctrl.m_link_down = 1;
+#endif 
+		memset(g_scan, 0, G_SCAN_RESULTS);
+		g_scan_specified_ssid = 0;
+#if defined(CONFIG_FIRST_SCAN)
+		
+		g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+		g_first_counter_scans = 0;
+#endif 
+#endif 
+
+#if defined(BCMLXSDMMC)
+		sdioh_stop(NULL);
+#endif
+
+		net_os_set_dtim_skip(dev, 0);
+
+		dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+
+		wl_iw_send_priv_event(dev, "STOP");
+	}
+
+	dhd_net_if_unlock(dev);
+
+	WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+	return ret;
+}
+
+static int
+wl_iw_control_wl_on(
+	struct net_device *dev,
+	struct iw_request_info *info
+)
+{
+	int ret = 0;
+
+	WL_TRACE(("Enter %s \n", __FUNCTION__));
+
+	ret = wl_control_wl_start(dev);
+
+	wl_iw_send_priv_event(dev, "START");
+
+#ifdef SOFTAP
+	if (!ap_fw_loaded) {
+		wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+	}
+#else
+	wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+#endif
+
+	WL_TRACE(("Exited %s\n", __FUNCTION__));
+
+	return ret;
+}
+
+#ifdef SOFTAP
+static struct ap_profile my_ap;
+static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap); 
+static int get_assoc_sta_list(struct net_device *dev, char *buf, int len);
+static int set_ap_mac_list(struct net_device *dev, void *buf);
+
+#define PTYPE_STRING 0
+#define PTYPE_INTDEC 1   
+#define PTYPE_INTHEX 2
+#define PTYPE_STR_HEX 3  
+
+static int get_parameter_from_string(
+	char **str_ptr, const char *token, int param_type, void  *dst, int param_max_len);
+
+static int
+hex2num(char c)
+{
+	if (c >= '0' && c <= '9')
+		return c - '0';
+	if (c >= 'a' && c <= 'f')
+		return c - 'a' + 10;
+	if (c >= 'A' && c <= 'F')
+		return c - 'A' + 10;
+	return -1;
+}
+
+
+
+static int
+hstr_2_buf(const char *txt, u8 *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		int a, b;
+
+		a = hex2num(*txt++);
+		if (a < 0)
+			return -1;
+		b = hex2num(*txt++);
+		if (b < 0)
+			return -1;
+		*buf++ = (a << 4) | b;
+	}
+
+	return 0;
+}
+
+
+
+static int
+init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg)
+{
+	char *str_ptr = param_str;
+	char sub_cmd[16];
+	int ret = 0;
+
+	memset(sub_cmd, 0, sizeof(sub_cmd));
+	memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+	
+	if (get_parameter_from_string(&str_ptr, "ASCII_CMD=",
+		PTYPE_STRING, sub_cmd, SSID_LEN) != 0) {
+	 return -1;
+	}
+	if (strncmp(sub_cmd, "AP_CFG", 6)) {
+	   WL_ERROR(("ERROR: sub_cmd:%s != 'AP_CFG'!\n", sub_cmd));
+		return -1;
+	}
+
+	
+	
+	ret = get_parameter_from_string(&str_ptr, "SSID=", PTYPE_STRING, ap_cfg->ssid, SSID_LEN);
+
+	ret |= get_parameter_from_string(&str_ptr, "SEC=", PTYPE_STRING,  ap_cfg->sec, SEC_LEN);
+
+	ret |= get_parameter_from_string(&str_ptr, "KEY=", PTYPE_STRING,  ap_cfg->key, KEY_LEN);
+
+	ret |= get_parameter_from_string(&str_ptr, "CHANNEL=", PTYPE_INTDEC, &ap_cfg->channel, 5);
+
+	
+	get_parameter_from_string(&str_ptr, "PREAMBLE=", PTYPE_INTDEC, &ap_cfg->preamble, 5);
+
+	
+	get_parameter_from_string(&str_ptr, "MAX_SCB=", PTYPE_INTDEC,  &ap_cfg->max_scb, 5);
+
+	
+	get_parameter_from_string(&str_ptr, "HIDDEN=",
+		PTYPE_INTDEC,  &ap_cfg->closednet, 5);
+
+	
+	get_parameter_from_string(&str_ptr, "COUNTRY=",
+		PTYPE_STRING,  &ap_cfg->country_code, 3);
+
+	return ret;
+}
+#endif 
+
+
+
+#ifdef SOFTAP
+static int
+iwpriv_set_ap_config(struct net_device *dev,
+            struct iw_request_info *info,
+            union iwreq_data *wrqu,
+            char *ext)
+{
+	int res = 0;
+	char  *extra = NULL;
+	struct ap_profile *ap_cfg = &my_ap;
+
+	WL_TRACE(("> Got IWPRIV SET_AP IOCTL: info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+		info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		char *str_ptr;
+
+		if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		extra[wrqu->data.length] = 0;
+		WL_SOFTAP((" Got str param in iw_point:\n %s\n", extra));
+
+		memset(ap_cfg, 0, sizeof(struct ap_profile));
+
+		
+
+		str_ptr = extra;
+
+		if ((res = init_ap_profile_from_string(extra, ap_cfg)) < 0) {
+			WL_ERROR(("%s failed to parse %d\n", __FUNCTION__, res));
+			kfree(extra);
+			return -1;
+		}
+
+	} else {
+	 
+	  WL_ERROR(("IWPRIV argument len = 0 \n"));
+	  return -1;
+	}
+
+	if ((res = set_ap_cfg(dev, ap_cfg)) < 0)
+		WL_ERROR(("%s failed to set_ap_cfg %d\n", __FUNCTION__, res));
+
+	kfree(extra);
+
+	return res;
+}
+#endif 
+
+
+
+#ifdef SOFTAP
+static int iwpriv_get_assoc_list(struct net_device *dev,
+        struct iw_request_info *info,
+        union iwreq_data *p_iwrq,
+        char *extra)
+{
+	int i, ret = 0;
+	char mac_buf[256];
+	struct maclist *sta_maclist = (struct maclist *)mac_buf;
+
+	char mac_lst[384];
+	char *p_mac_str;
+	char *p_mac_str_end;
+	wl_iw_t *iw;
+
+	if ((!dev) || (!extra)) {
+		
+		return -EINVAL;
+	}
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+
+	MUTEX_LOCK_SOFTAP_SET(iw->pub);
+	DHD_OS_WAKE_LOCK(iw->pub);
+
+	WL_TRACE(("\n %s: IWPRIV IOCTL: cmd:%hx, flags:%hx, extra:%p, iwp.len:%d,"
+		"iwp.len:%p, iwp.flags:%x  \n", __FUNCTION__, info->cmd, info->flags,
+		extra, p_iwrq->data.length, p_iwrq->data.pointer, p_iwrq->data.flags));
+
+	memset(sta_maclist, 0, sizeof(mac_buf));
+
+	sta_maclist->count = 8;
+
+	WL_SOFTAP(("%s: net device:%s, buf_sz:%d\n",
+		__FUNCTION__, dev->name, sizeof(mac_buf)));
+
+	if ((ret = get_assoc_sta_list(dev, mac_buf, sizeof(mac_buf))) < 0) {
+		WL_ERROR(("%s: sta list ioctl error:%d\n",
+			__FUNCTION__, ret));
+		goto func_exit;
+	}
+
+	WL_SOFTAP(("%s: got %d stations\n", __FUNCTION__,
+		sta_maclist->count));
+
+	
+	memset(mac_lst, 0, sizeof(mac_lst));
+	p_mac_str = mac_lst;
+	p_mac_str_end = &mac_lst[sizeof(mac_lst)-1];
+
+	for (i = 0; i < 8; i++) { 
+		struct ether_addr * id = &sta_maclist->ea[i];
+		if (!ETHER_ISNULLADDR(id->octet)) {
+			scb_val_t scb_val;
+			int rssi = 0;
+			bzero(&scb_val, sizeof(scb_val_t));
+
+			
+			if ((p_mac_str_end - p_mac_str) <= 36) {
+				WL_ERROR(("%s: mac list buf is < 36 for item[%i] item\n",
+					__FUNCTION__, i));
+				break;
+			}
+
+			p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+			"\nMac[%d]=%02X:%02X:%02X:%02X:%02X:%02X,", i,
+			id->octet[0], id->octet[1], id->octet[2],
+			id->octet[3], id->octet[4], id->octet[5]);
+
+			
+			bcopy(id->octet, &scb_val.ea, 6);
+			ret = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+			if (ret  < 0) {
+				snprintf(p_mac_str, MAX_WX_STRING, "RSSI:ERR");
+				WL_ERROR(("%s: RSSI ioctl error:%d\n",
+					__FUNCTION__, ret));
+				break;
+			}
+
+			rssi = dtoh32(scb_val.val);
+			p_mac_str += snprintf(p_mac_str, MAX_WX_STRING,
+			"RSSI:%d", rssi);
+		}
+	}
+
+	p_iwrq->data.length = strlen(mac_lst)+1; 
+
+	WL_SOFTAP(("%s: data to user:\n%s\n usr_ptr:%p\n", __FUNCTION__,
+		mac_lst, p_iwrq->data.pointer));
+
+	if (p_iwrq->data.length) {
+		bcopy(mac_lst, extra, p_iwrq->data.length);
+	}
+
+func_exit:
+	DHD_OS_WAKE_UNLOCK(iw->pub);
+	MUTEX_UNLOCK_SOFTAP_SET(iw->pub);
+
+	WL_SOFTAP(("%s: Exited\n", __FUNCTION__));
+	return ret;
+}
+#endif 
+
+
+#ifdef SOFTAP
+
+#define MAC_FILT_MAX 8
+static int iwpriv_set_mac_filters(struct net_device *dev,
+        struct iw_request_info *info,
+        union iwreq_data *wrqu,
+        char *ext)
+{
+	int i, ret = -1;
+	char  * extra = NULL;
+	int mac_cnt = 0; 
+	int mac_mode = 0;
+	struct ether_addr *p_ea;
+	struct mac_list_set mflist_set; 
+
+	WL_SOFTAP((">>> Got IWPRIV SET_MAC_FILTER IOCTL:  info->cmd:%x,"
+			"info->flags:%x, u.data:%p, u.len:%d\n",
+			info->cmd, info->flags,
+			wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		char *str_ptr;
+
+		if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		extra[wrqu->data.length] = 0;
+		WL_SOFTAP((" Got parameter string in iw_point:\n %s \n", extra));
+
+		memset(&mflist_set, 0, sizeof(mflist_set));
+
+		
+		str_ptr = extra;
+
+
+		
+		if (get_parameter_from_string(&str_ptr, "MAC_MODE=",
+			PTYPE_INTDEC, &mac_mode, 4) != 0) {
+			WL_ERROR(("ERROR: 'MAC_MODE=' token is missing\n"));
+			goto exit_proc;
+		}
+
+		p_ea = &mflist_set.mac_list.ea[0];
+
+		if (get_parameter_from_string(&str_ptr, "MAC_CNT=",
+			PTYPE_INTDEC, &mac_cnt, 4) != 0) {
+			WL_ERROR(("ERROR: 'MAC_CNT=' token param is missing \n"));
+			goto exit_proc;
+		}
+
+		if (mac_cnt > MAC_FILT_MAX) {
+			WL_ERROR(("ERROR: number of MAC filters > MAX\n"));
+			goto exit_proc;
+		}
+
+		for (i=0; i< mac_cnt; i++)	
+		if (get_parameter_from_string(&str_ptr, "MAC=",
+			PTYPE_STR_HEX, &p_ea[i], 12) != 0) {
+			WL_ERROR(("ERROR: MAC_filter[%d] is missing !\n", i));
+			goto exit_proc;
+		}
+
+		WL_SOFTAP(("MAC_MODE=:%d, MAC_CNT=%d, MACs:..\n", mac_mode, mac_cnt));
+		for (i = 0; i < mac_cnt; i++) {
+		   WL_SOFTAP(("mac_filt[%d]:", i));
+		   dhd_print_buf(&p_ea[i], 6, 0);
+		}
+
+		
+		mflist_set.mode = mac_mode;
+		mflist_set.mac_list.count = mac_cnt;
+		set_ap_mac_list(dev, &mflist_set);
+
+		
+		wrqu->data.pointer = NULL;
+		wrqu->data.length = 0;
+		ret = 0;
+
+	} else {
+	 
+	  WL_ERROR(("IWPRIV argument len is 0\n"));
+	  return -1;
+	}
+
+	exit_proc:
+	kfree(extra);
+	return ret;
+}
+#endif 
+
+
+#ifdef SOFTAP
+
+static int iwpriv_set_ap_sta_disassoc(struct net_device *dev,
+        struct iw_request_info *info,
+        union iwreq_data *wrqu,
+        char *ext)
+{
+	int res = 0;
+	char sta_mac[6] = {0, 0, 0, 0, 0, 0};
+	char cmd_buf[256];
+	char *str_ptr = cmd_buf;
+
+	WL_SOFTAP((">>%s called\n args: info->cmd:%x,"
+		" info->flags:%x, u.data.p:%p, u.data.len:%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		if (copy_from_user(cmd_buf, wrqu->data.pointer, wrqu->data.length)) {
+			return -EFAULT;
+		}
+
+		if (get_parameter_from_string(&str_ptr,
+			"MAC=", PTYPE_STR_HEX, sta_mac, 12) == 0) {
+			res = wl_iw_softap_deassoc_stations(dev, sta_mac);
+		} else  {
+			WL_ERROR(("ERROR: STA_MAC= token not found\n"));
+		}
+	}
+
+	return res;
+}
+#endif 
+
+#endif 
+
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+	__u16		cmd;		
+	__u16		flags;		
+};
+
+typedef int (*iw_handler)(struct net_device *dev,
+                struct iw_request_info *info,
+                void *wrqu,
+                char *extra);
+#endif 
+
+static int
+wl_iw_config_commit(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *zwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct sockaddr bssid;
+
+	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+		return error;
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	if (!ssid.SSID_len)
+		return 0;
+
+	bzero(&bssid, sizeof(struct sockaddr));
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __FUNCTION__, ssid.SSID));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_name(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	char *cwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+	strcpy(cwrq, "IEEE 802.11-DS");
+
+	return 0;
+}
+
+static int
+wl_iw_set_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error, chan;
+	uint sf = 0;
+
+	WL_TRACE(("%s %s: SIOCSIWFREQ\n", __FUNCTION__, dev->name));
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("%s:>> not executed, 'SOFT_AP is active' \n", __FUNCTION__));
+		return 0;
+	}
+#endif
+
+	
+	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+		chan = fwrq->m;
+	}
+	
+	else {
+		
+		if (fwrq->e >= 6) {
+			fwrq->e -= 6;
+			while (fwrq->e--)
+				fwrq->m *= 10;
+		} else if (fwrq->e < 6) {
+			while (fwrq->e++ < 6)
+				fwrq->m /= 10;
+		}
+		
+		if (fwrq->m > 4000 && fwrq->m < 5000)
+			sf = WF_CHAN_FACTOR_4_G; 
+
+		chan = wf_mhz2channel(fwrq->m, sf);
+	}
+
+	chan = htod32(chan);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+		return error;
+
+	g_wl_iw_params.target_channel = chan;
+
+	
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+
+	
+	fwrq->m = dtoh32(ci.hw_channel);
+	fwrq->e = dtoh32(0);
+	return 0;
+}
+
+static int
+wl_iw_set_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int infra = 0, ap = 0, error = 0;
+
+	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+	switch (*uwrq) {
+	case IW_MODE_MASTER:
+		infra = ap = 1;
+		break;
+	case IW_MODE_ADHOC:
+	case IW_MODE_AUTO:
+		break;
+	case IW_MODE_INFRA:
+		infra = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	infra = htod32(infra);
+	ap = htod32(ap);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+		return error;
+
+	
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int error, infra = 0, ap = 0;
+
+	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+		return error;
+
+	infra = dtoh32(infra);
+	ap = dtoh32(ap);
+	*uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+	return 0;
+}
+
+static int
+wl_iw_get_range(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	struct iw_range *range = (struct iw_range *) extra;
+	wl_uint32_list_t *list;
+	wl_rateset_t rateset;
+	int8 *channels;
+	int error, i, k;
+	uint sf, ch;
+
+	int phytype;
+	int bw_cap = 0, sgi_tx = 0, nmode = 0;
+	channel_info_t ci;
+	uint8 nrate_list2copy = 0;
+	uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+		{14, 29, 43, 58, 87, 116, 130, 144},
+		{27, 54, 81, 108, 162, 216, 243, 270},
+		{30, 60, 90, 120, 180, 240, 270, 300}};
+
+	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	channels = kmalloc((MAXCHANNEL+1)*4, GFP_KERNEL);
+	if (!channels) {
+		WL_ERROR(("Could not alloc channels\n"));
+		return -ENOMEM;
+	}
+	list = (wl_uint32_list_t *)channels;
+
+	dwrq->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(range));
+
+	
+	range->min_nwid = range->max_nwid = 0;
+
+	
+	list->count = htod32(MAXCHANNEL);
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, (MAXCHANNEL+1)*4))) {
+		kfree(channels);
+		return error;
+	}
+	for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+		range->freq[i].i = dtoh32(list->element[i]);
+
+		ch = dtoh32(list->element[i]);
+		if (ch <= CH_MAX_2G_CHANNEL)
+			sf = WF_CHAN_FACTOR_2_4_G;
+		else
+			sf = WF_CHAN_FACTOR_5_G;
+
+		range->freq[i].m = wf_channel2mhz(ch, sf);
+		range->freq[i].e = 6;
+	}
+	range->num_frequency = range->num_channels = i;
+
+	
+	range->max_qual.qual = 5;
+	
+	range->max_qual.level = 0x100 - 200;	
+	
+	range->max_qual.noise = 0x100 - 200;	
+	
+	range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+	
+	range->avg_qual.qual = 3;
+	
+	range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+	
+	range->avg_qual.noise = 0x100 - 75;	
+#endif 
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) {
+		kfree(channels);
+		return error;
+	}
+	rateset.count = dtoh32(rateset.count);
+	range->num_bitrates = rateset.count;
+	for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+		range->bitrate[i] = (rateset.rates[i]& 0x7f) * 500000; 
+	dev_wlc_intvar_get(dev, "nmode", &nmode);
+	dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype));
+
+	if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) {
+		dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
+		dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
+		dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t));
+		ci.hw_channel = dtoh32(ci.hw_channel);
+
+		if (bw_cap == 0 ||
+			(bw_cap == 2 && ci.hw_channel <= 14)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 0;
+			else
+				nrate_list2copy = 1;
+		}
+		if (bw_cap == 1 ||
+			(bw_cap == 2 && ci.hw_channel >= 36)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 2;
+			else
+				nrate_list2copy = 3;
+		}
+		range->num_bitrates += 8;
+		for (k = 0; i < range->num_bitrates; k++, i++) {
+			
+			range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+		}
+	}
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) {
+		kfree(channels);
+		return error;
+	}
+	i = dtoh32(i);
+	if (i == WLC_PHY_TYPE_A)
+		range->throughput = 24000000;	
+	else
+		range->throughput = 1500000;	
+
+	
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+	range->num_encoding_sizes = 4;
+	range->encoding_size[0] = WEP1_KEY_SIZE;
+	range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+	range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+	range->encoding_size[2] = 0;
+#endif
+	range->encoding_size[3] = AES_KEY_SIZE;
+
+	
+	range->min_pmp = 0;
+	range->max_pmp = 0;
+	range->min_pmt = 0;
+	range->max_pmt = 0;
+	range->pmp_flags = 0;
+	range->pm_capa = 0;
+
+	
+	range->num_txpower = 2;
+	range->txpower[0] = 1;
+	range->txpower[1] = 255;
+	range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 19;
+
+	
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->r_time_flags = 0;
+	
+	range->min_retry = 1;
+	range->max_retry = 255;
+	
+	range->min_r_time = 0;
+	range->max_r_time = 0;
+#endif 
+
+#if WIRELESS_EXT > 17
+	range->enc_capa = IW_ENC_CAPA_WPA;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+	range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+	
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+	
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+#endif 
+
+	kfree(channels);
+
+	return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+
+static int
+wl_iw_set_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	int i;
+
+	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+	for (i = 0; i < iw->spy_num; i++)
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+	return 0;
+}
+
+static int
+wl_iw_get_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = iw->spy_num;
+	for (i = 0; i < iw->spy_num; i++) {
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		addr[i].sa_family = AF_UNIX;
+		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+		iw->spy_qual[i].updated = 0;
+	}
+
+	return 0;
+}
+
+
+static int
+wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params, int *join_params_size)
+{
+	chanspec_t chanspec = 0;
+
+	if (ch != 0) {
+		
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0])
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+		        htodchanspec(join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num = htod32(join_params->params.chanspec_num);
+
+		WL_TRACE(("%s  join_params->params.chanspec_list[0]= %X\n",
+			__FUNCTION__, join_params->params.chanspec_list[0]));
+	}
+	return 1;
+}
+
+static int
+wl_iw_set_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	int error = -EINVAL;
+	wl_join_params_t join_params;
+	int join_params_size;
+
+	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+	if (awrq->sa_family != ARPHRD_ETHER) {
+		WL_ERROR(("Invalid Header...sa_family\n"));
+		return -EINVAL;
+	}
+
+	
+	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+		scb_val_t scbval;
+		
+		bzero(&scbval, sizeof(scb_val_t));
+		
+		(void) dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+		return 0;
+	}
+
+
+	
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
+	memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+
+	
+	
+	WL_TRACE(("%s  target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel));
+	wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) {
+		WL_ERROR(("%s Invalid ioctl data=%d\n", __FUNCTION__, error));
+		return error;
+	}
+
+	if (g_ssid.SSID_len) {
+		WL_TRACE(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__,
+			g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data),
+			g_wl_iw_params.target_channel));
+	}
+
+	
+	memset(&g_ssid, 0, sizeof(g_ssid));
+	return 0;
+}
+
+static int
+wl_iw_get_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+	awrq->sa_family = ARPHRD_ETHER;
+	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+	
+	(void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	struct iw_mlme *mlme;
+	scb_val_t scbval;
+	int error  = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name));
+
+	mlme = (struct iw_mlme *)extra;
+	if (mlme == NULL) {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	scbval.val = mlme->reason_code;
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+	if (mlme->cmd == IW_MLME_DISASSOC) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+	}
+	else if (mlme->cmd == IW_MLME_DEAUTH) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+			sizeof(scb_val_t));
+	}
+	else {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	return error;
+}
+#endif 
+
+#ifndef WL_IW_USE_ISCAN
+static int
+wl_iw_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int error, i;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+	if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+		          __FUNCTION__, list->version));
+		kfree(list);
+		return -EINVAL;
+	}
+
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif 
+
+		dwrq->length++;
+	}
+
+	kfree(list);
+
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+#endif 
+
+#ifdef WL_IW_USE_ISCAN
+static int
+wl_iw_iscan_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	iscan_buf_t * buf;
+	iscan_info_t *iscan = g_iscan;
+
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) {
+		WL_ERROR(("%s error\n", __FUNCTION__));
+		return 0;
+	}
+
+	buf = iscan->list_hdr;
+	
+	while (buf) {
+		list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+		if (list->version != WL_BSS_INFO_VERSION) {
+			WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+				__FUNCTION__, list->version));
+			return -EINVAL;
+		}
+
+		bi = NULL;
+		for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+			bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
+			          : list->bss_info;
+			ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+				WLC_IW_ISCAN_MAXLEN));
+
+			
+			if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+				continue;
+
+			
+			memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+			addr[dwrq->length].sa_family = ARPHRD_ETHER;
+			qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+			qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+			qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+			
+#if WIRELESS_EXT > 18
+			qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+			qual[dwrq->length].updated = 7;
+#endif 
+
+			dwrq->length++;
+		}
+		buf = buf->next;
+	}
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+#if defined(CONFIG_FIRST_SCAN)
+	
+	if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+		params->passive_time = 30;
+#endif 
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+	return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+	int err = 0;
+
+	iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+	iscan->iscan_ex_params_p->action = htod16(action);
+	iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+	WL_SCAN(("%s : nprobes=%d\n", __FUNCTION__, iscan->iscan_ex_params_p->params.nprobes));
+	WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+	WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+	WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+	WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+	WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type));
+
+	if ((dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p,
+		iscan->iscan_ex_param_size, iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+			WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+			err = -1;
+	}
+
+	return err;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+	iscan_info_t *iscan = (iscan_info_t *)data;
+	if (iscan) {
+		iscan->timer_on = 0;
+		if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+			WL_TRACE(("timer trigger\n"));
+			up(&iscan->tsk_ctl.sema);
+		}
+	}
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	
+
+	dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+		iovbuf, sizeof(iovbuf));
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+	iscan_buf_t * buf;
+	iscan_buf_t * ptr;
+	wl_iscan_results_t * list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	uint32 status;
+	int res = 0;
+
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	if (iscan->list_cur) {
+		buf = iscan->list_cur;
+		iscan->list_cur = buf->next;
+	}
+	else {
+		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+		if (!buf) {
+			WL_ERROR(("%s can't alloc iscan_buf_t : going to abort currect iscan\n",
+			          __FUNCTION__));
+			DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+			return WL_SCAN_RESULTS_NO_MEM;
+		}
+		buf->next = NULL;
+		if (!iscan->list_hdr)
+			iscan->list_hdr = buf;
+		else {
+			ptr = iscan->list_hdr;
+			while (ptr->next) {
+				ptr = ptr->next;
+			}
+			ptr->next = buf;
+		}
+	}
+	memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	res = dev_iw_iovar_getbuf(
+		iscan->dev,
+		"iscanresults",
+		&list,
+		WL_ISCAN_RESULTS_FIXED_SIZE,
+		buf->iscan_buf,
+		WLC_IW_ISCAN_MAXLEN);
+	if (res == 0) {
+		results->buflen = dtoh32(results->buflen);
+		results->version = dtoh32(results->version);
+		results->count = dtoh32(results->count);
+		WL_TRACE(("results->count = %d\n", results->count));
+		WL_TRACE(("results->buflen = %d\n", results->buflen));
+		status = dtoh32(list_buf->status);
+	} else {
+		WL_ERROR(("%s returns error %d\n", __FUNCTION__, res));
+		
+		status = WL_SCAN_RESULTS_NO_MEM;
+	}
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+	return status;
+}
+
+static void
+wl_iw_force_specific_scan(iscan_info_t *iscan)
+{
+	WL_TRACE(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_lock();
+#endif
+
+	(void) dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_unlock();
+#endif
+}
+
+static void
+wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+#ifndef SANDGATE2G
+	union iwreq_data wrqu;
+
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	
+	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+#if defined(CONFIG_FIRST_SCAN)
+		if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED)
+			g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY;
+#endif 
+		WL_TRACE(("Send Event ISCAN complete\n"));
+#endif 
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+	uint32 status;
+
+	tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data;
+	iscan_info_t *iscan = (iscan_info_t *) tsk_ctl->parent;
+
+
+	static bool iscan_pass_abort = FALSE;
+
+	DAEMONIZE("iscan_sysioc");
+
+	status = WL_SCAN_RESULTS_PARTIAL;
+
+	
+	complete(&tsk_ctl->completed);
+
+	while (down_interruptible(&tsk_ctl->sema) == 0) {
+
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk_ctl->terminated) {
+			break;
+		}
+#if defined(SOFTAP)
+		
+		if (ap_cfg_running) {
+		 WL_TRACE(("%s skipping SCAN ops in AP mode !!!\n", __FUNCTION__));
+		 net_os_wake_unlock(iscan->dev);
+		 continue;
+		}
+#endif 
+
+		if (iscan->timer_on) {
+			
+			iscan->timer_on = 0;
+			del_timer_sync(&iscan->timer);
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_lock();
+#endif
+		status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_unlock();
+#endif
+
+	if  (g_scan_specified_ssid && (iscan_pass_abort == TRUE)) {
+		WL_TRACE(("%s Get results from specific scan status=%d\n", __FUNCTION__, status));
+			wl_iw_send_scan_complete(iscan);
+			iscan_pass_abort = FALSE;
+			status  = -1;
+		}
+
+		switch (status) {
+			case WL_SCAN_RESULTS_PARTIAL:
+				WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_lock();
+#endif
+				
+				wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_unlock();
+#endif
+				
+				mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_SUCCESS:
+				WL_TRACE(("iscanresults complete\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			case WL_SCAN_RESULTS_PENDING:
+				WL_TRACE(("iscanresults pending\n"));
+				
+				mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_ABORTED:
+				WL_TRACE(("iscanresults aborted\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				if (g_scan_specified_ssid == 0)
+					wl_iw_send_scan_complete(iscan);
+				else {
+					iscan_pass_abort = TRUE;
+					wl_iw_force_specific_scan(iscan);
+				}
+				break;
+			case WL_SCAN_RESULTS_NO_MEM:
+				WL_TRACE(("iscanresults can't alloc memory: skip\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				break;
+			default:
+				WL_TRACE(("iscanresults returned unknown status %d\n", status));
+				break;
+		 }
+
+		net_os_wake_unlock(iscan->dev);
+	}
+
+	if (iscan->timer_on) {
+		iscan->timer_on = 0;
+		del_timer_sync(&iscan->timer);
+	}
+	complete_and_exit(&tsk_ctl->completed, 0);
+}
+#endif 
+
+#if !defined(CSCAN)
+
+static void
+wl_iw_set_ss_cache_timer_flag(void)
+{
+	g_ss_cache_ctrl.m_timer_expired = 1;
+	WL_TRACE(("%s called\n", __FUNCTION__));
+}
+
+
+static int
+wl_iw_init_ss_cache_ctrl(void)
+{
+	WL_TRACE(("%s :\n", __FUNCTION__));
+	g_ss_cache_ctrl.m_prev_scan_mode = 0;
+	g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+	g_ss_cache_ctrl.m_cache_head = NULL;
+	g_ss_cache_ctrl.m_link_down = 0;
+	g_ss_cache_ctrl.m_timer_expired = 0;
+	memset(g_ss_cache_ctrl.m_active_bssid, 0, ETHER_ADDR_LEN);
+
+	g_ss_cache_ctrl.m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
+	if (!g_ss_cache_ctrl.m_timer) {
+		return -ENOMEM;
+	}
+	g_ss_cache_ctrl.m_timer->function = (void *)wl_iw_set_ss_cache_timer_flag;
+	init_timer(g_ss_cache_ctrl.m_timer);
+
+	return 0;
+}
+
+
+
+static void
+wl_iw_free_ss_cache(void)
+{
+	wl_iw_ss_cache_t *node, *cur;
+	wl_iw_ss_cache_t **spec_scan_head;
+
+	WL_TRACE(("%s called\n", __FUNCTION__));
+
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+	node = *spec_scan_head;
+
+	for (;node;) {
+		WL_TRACE(("%s : SSID - %s\n", __FUNCTION__, node->bss_info->SSID));
+		cur = node;
+		node = cur->next;
+		kfree(cur);
+	}
+	*spec_scan_head = NULL;
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+}
+
+
+
+static int
+wl_iw_run_ss_cache_timer(int kick_off)
+{
+	struct timer_list **timer;
+
+	timer = &g_ss_cache_ctrl.m_timer;
+
+	if (*timer) {
+		if (kick_off) {
+#ifdef CONFIG_PRESCANNED
+			(*timer)->expires = jiffies + 70000 * HZ / 1000;
+#else
+			(*timer)->expires = jiffies + 30000 * HZ / 1000;	
+#endif
+			add_timer(*timer);
+			WL_TRACE(("%s : timer starts \n", __FUNCTION__));
+		} else {
+			del_timer_sync(*timer);
+			WL_TRACE(("%s : timer stops \n", __FUNCTION__));
+		}
+	}
+
+	return 0;
+}
+
+
+static void
+wl_iw_release_ss_cache_ctrl(void)
+{
+	WL_TRACE(("%s :\n", __FUNCTION__));
+	wl_iw_free_ss_cache();
+	wl_iw_run_ss_cache_timer(0);
+	if (g_ss_cache_ctrl.m_timer) {
+		kfree(g_ss_cache_ctrl.m_timer);
+	}
+}
+
+
+
+static void
+wl_iw_reset_ss_cache(void)
+{
+	wl_iw_ss_cache_t *node, *prev, *cur;
+	wl_iw_ss_cache_t **spec_scan_head;
+
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+	node = *spec_scan_head;
+	prev = node;
+
+	for (;node;) {
+		WL_TRACE(("%s : node SSID %s \n", __FUNCTION__, node->bss_info->SSID));
+		if (!node->dirty) {
+			cur = node;
+			if (cur == *spec_scan_head) {
+				*spec_scan_head = cur->next;
+				prev = *spec_scan_head;
+			}
+			else {
+				prev->next = cur->next;
+			}
+			node = cur->next;
+
+			WL_TRACE(("%s : Del node : SSID %s\n", __FUNCTION__, cur->bss_info->SSID));
+			kfree(cur);
+			continue;
+		}
+
+		node->dirty = 0;
+		prev = node;
+		node = node->next;
+	}
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+}
+
+
+static int
+wl_iw_add_bss_to_ss_cache(wl_scan_results_t *ss_list)
+{
+
+	wl_iw_ss_cache_t *node, *prev, *leaf;
+	wl_iw_ss_cache_t **spec_scan_head;
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	
+	if (!ss_list->count) {
+		return 0;
+	}
+
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+
+	for (i = 0; i < ss_list->count; i++) {
+
+		node = *spec_scan_head;
+		prev = node;
+
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+
+		WL_TRACE(("%s : find %d with specific SSID %s\n", __FUNCTION__, i, bi->SSID));
+		for (;node;) {
+			if (!memcmp(&node->bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+				
+				WL_TRACE(("dirty marked : SSID %s\n", bi->SSID));
+				node->dirty = 1;
+				break;
+			}
+			prev = node;
+			node = node->next;
+		}
+
+		if (node) {
+			continue;
+		}
+
+		leaf = kmalloc(bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL);
+		if (!leaf) {
+			WL_ERROR(("Memory alloc failure %d\n",
+				bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN));
+			DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+			return -ENOMEM;
+		}
+
+		memcpy(leaf->bss_info, bi, bi->length);
+		leaf->next = NULL;
+		leaf->dirty = 1;
+		leaf->count = 1;
+		leaf->version = ss_list->version;
+
+		if (!prev) {
+			*spec_scan_head = leaf;
+		}
+		else {
+			prev->next = leaf;
+		}
+	}
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+	return 0;
+}
+
+
+static int
+wl_iw_merge_scan_cache(struct iw_request_info *info, char *extra, uint buflen_from_user,
+__u16 *merged_len)
+{
+	wl_iw_ss_cache_t *node;
+	wl_scan_results_t *list_merge;
+
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	node = g_ss_cache_ctrl.m_cache_head;
+	for (;node;) {
+		list_merge = (wl_scan_results_t *)&node->buflen;
+		WL_TRACE(("%s: Cached Specific APs list=%d\n", __FUNCTION__, list_merge->count));
+		if (buflen_from_user - *merged_len > 0) {
+			*merged_len += (__u16) wl_iw_get_scan_prep(list_merge, info,
+				extra + *merged_len, buflen_from_user - *merged_len);
+		}
+		else {
+			WL_TRACE(("%s: exit with break\n", __FUNCTION__));
+			break;
+		}
+		node = node->next;
+	}
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+	return 0;
+}
+
+
+static int
+wl_iw_delete_bss_from_ss_cache(void *addr)
+{
+
+	wl_iw_ss_cache_t *node, *prev;
+	wl_iw_ss_cache_t **spec_scan_head;
+
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	spec_scan_head = &g_ss_cache_ctrl.m_cache_head;
+	node = *spec_scan_head;
+	prev = node;
+	for (;node;) {
+		if (!memcmp(&node->bss_info->BSSID, addr, ETHER_ADDR_LEN)) {
+			if (node == *spec_scan_head) {
+				*spec_scan_head = node->next;
+			}
+			else {
+				prev->next = node->next;
+			}
+
+			WL_TRACE(("%s : Del node : %s\n", __FUNCTION__, node->bss_info->SSID));
+			kfree(node);
+			break;
+		}
+
+		prev = node;
+		node = node->next;
+	}
+
+	memset(addr, 0, ETHER_ADDR_LEN);
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+	return 0;
+}
+
+#endif	
+
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int error;
+	WL_TRACE(("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __FUNCTION__, dev->name));
+
+#ifdef OEM_CHROMIUMOS
+	g_set_essid_before_scan = FALSE;
+#endif
+
+#if defined(CSCAN)
+		WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+		return -EINVAL;
+#endif 
+
+#if defined(SOFTAP)
+	
+	if (ap_cfg_running) {
+		WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		return 0;
+	}
+#endif 
+
+	
+	if (g_onoff == G_WLAN_SET_OFF)
+		return 0;
+
+	
+	memset(&g_specific_ssid, 0, sizeof(g_specific_ssid));
+#ifndef WL_IW_USE_ISCAN
+	
+	g_scan_specified_ssid = 0;
+#endif 
+
+#if WIRELESS_EXT > 17
+	
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+#if defined(CONFIG_FIRST_SCAN)
+			if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+				
+				WL_TRACE(("%s Ignoring SC %s first BC is not done = %d\n",
+				          __FUNCTION__, req->essid,
+				          g_first_broadcast_scan));
+				return -EBUSY;
+			}
+#endif	
+			if (g_scan_specified_ssid) {
+				WL_TRACE(("%s Specific SCAN is not done ignore scan for = %s \n",
+					__FUNCTION__, req->essid));
+				
+				return -EBUSY;
+			}
+			else {
+				g_specific_ssid.SSID_len = MIN(sizeof(g_specific_ssid.SSID),
+				                               req->essid_len);
+				memcpy(g_specific_ssid.SSID, req->essid, g_specific_ssid.SSID_len);
+				g_specific_ssid.SSID_len = htod32(g_specific_ssid.SSID_len);
+				g_scan_specified_ssid = 1;
+				WL_TRACE(("### Specific scan ssid=%s len=%d\n",
+				          g_specific_ssid.SSID, g_specific_ssid.SSID_len));
+			}
+		}
+	}
+#endif 
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) {
+		WL_TRACE(("#### Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error));
+		
+		g_scan_specified_ssid = 0;
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+#ifdef WL_IW_USE_ISCAN
+int
+wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+
+#if defined(CONFIG_FIRST_SCAN)
+	
+	if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) {
+		g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED;
+		WL_TRACE(("%s: First Brodcast scan was forced\n", __FUNCTION__));
+	}
+	else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) {
+		WL_TRACE(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__));
+		return 0;
+	}
+#endif 
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (flag)
+		rtnl_lock();
+#endif
+
+	dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag));
+	wl_iw_set_event_mask(dev);
+
+	WL_TRACE(("+++: Set Broadcast ISCAN\n"));
+	
+	memset(&ssid, 0, sizeof(ssid));
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+
+	memset(&iscan->iscan_ex_params_p->params, 0, iscan->iscan_ex_param_size);
+	wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid);
+	wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (flag)
+		rtnl_unlock();
+#endif
+
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+	iscan->timer_on = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+	int ret = 0;
+
+	WL_TRACE_SCAN(("%s: SIOCSIWSCAN : ISCAN\n", dev->name));
+
+#if defined(CSCAN)
+		WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__));
+		return -EINVAL;
+#endif 
+
+	net_os_wake_lock(dev);
+
+	
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		goto set_scan_end;
+	}
+#endif
+	
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		goto set_scan_end;
+	}
+
+#ifdef PNO_SUPPORT
+	
+	if  (dhd_dev_get_pno_status(dev)) {
+		WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__));
+	}
+#endif 
+
+	
+	if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) {
+		WL_ERROR(("%s error \n",  __FUNCTION__));
+		goto set_scan_end;
+	}
+
+	if (g_scan_specified_ssid) {
+		WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n",
+		          __FUNCTION__));
+		ret = EBUSY;
+		goto set_scan_end;
+	}
+
+	
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			int as = 0;
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+			dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as));
+			wl_iw_set_event_mask(dev);
+			ret = wl_iw_set_scan(dev, info, wrqu, extra);
+			goto set_scan_end;
+		}
+		else {
+			g_scan_specified_ssid = 0;
+
+			if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+				WL_TRACE(("%s ISCAN already in progress \n", __FUNCTION__));
+				goto set_scan_end;
+			}
+		}
+	}
+#endif 
+
+#if defined(CONFIG_FIRST_SCAN) && !defined(CSCAN)
+	if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+		if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+
+			WL_ERROR(("%s Clean up First scan flag which is %d\n",
+			          __FUNCTION__, g_first_broadcast_scan));
+			g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+		}
+		else {
+			WL_ERROR(("%s Ignoring Broadcast Scan:First Scan is not done yet %d\n",
+			          __FUNCTION__, g_first_counter_scans));
+			ret = -EBUSY;
+			goto set_scan_end;
+		}
+	}
+#endif
+
+	wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
+
+set_scan_end:
+	net_os_wake_unlock(dev);
+	return ret;
+}
+#endif 
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+	uint8 *ie = *wpaie;
+
+	
+	if ((ie[1] >= 6) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+		return TRUE;
+	}
+
+	
+	ie += ie[1] + 2;
+	
+	*tlvs_len -= (int)(ie - *tlvs);
+	
+	*tlvs = ie;
+	return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+	uint8 *ie = *wpsie;
+
+	
+	if ((ie[1] >= 4) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+		return TRUE;
+	}
+
+	
+	ie += ie[1] + 2;
+	
+	*tlvs_len -= (int)(ie - *tlvs);
+	
+	*tlvs = ie;
+	return FALSE;
+}
+#endif 
+
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+	struct iw_event	iwe;
+	char *event;
+
+	event = *event_p;
+	if (bi->ie_length) {
+		
+		bcm_tlv_t *ie;
+		uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		int ptr_len = bi->ie_length;
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			
+			if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+	*event_p = event;
+	}
+#endif 
+
+	return 0;
+}
+
+#ifndef CSCAN
+static uint
+wl_iw_get_scan_prep(
+	wl_scan_results_t *list,
+	struct iw_request_info *info,
+	char *extra,
+	short max_size)
+{
+	int  i, j;
+	struct iw_event  iwe;
+	wl_bss_info_t *bi = NULL;
+	char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+	int	ret = 0;
+
+	if (!list) {
+		WL_ERROR(("%s: Null list pointer", __FUNCTION__));
+		return ret;
+	}
+
+	
+
+	for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+		if (list->version != WL_BSS_INFO_VERSION) {
+			WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+			          __FUNCTION__, list->version));
+			return ret;
+		}
+
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+
+		WL_TRACE(("%s : %s\n", __FUNCTION__, bi->SSID));
+
+		
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+		
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		
+		iwe.cmd = SIOCGIWFREQ;
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		
+		 wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		
+		if (bi->rateset.count) {
+			if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) {
+				value = event + IW_EV_LCP_LEN;
+				iwe.cmd = SIOCGIWRATE;
+				
+				iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+				for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+					iwe.u.bitrate.value =
+						(bi->rateset.rates[j] & 0x7f) * 500000;
+					value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+						IW_EV_PARAM_LEN);
+				}
+				event = value;
+			}
+		}
+	}
+
+	if ((ret = (event - extra)) < 0) {
+		WL_ERROR(("==> Wrong size\n"));
+		ret = 0;
+	}
+
+	WL_TRACE(("%s: size=%d bytes prepared \n", __FUNCTION__, (unsigned int)(event - extra)));
+	return (uint)ret;
+}
+
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	wl_scan_results_t *list_merge;
+	wl_scan_results_t *list = (wl_scan_results_t *) g_scan;
+	int error;
+	uint buflen_from_user = dwrq->length;
+	uint len =  G_SCAN_RESULTS;
+	__u16 len_ret = 0;
+#if  !defined(CSCAN)
+	__u16 merged_len = 0;
+#endif
+#if defined(WL_IW_USE_ISCAN)
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+#if  !defined(CSCAN)
+	uint32 counter = 0;
+#endif 
+#endif 
+
+	WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user));
+
+	if (!extra) {
+		WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name));
+		return -EINVAL;
+	}
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+	ci.scan_channel = dtoh32(ci.scan_channel);
+	if (ci.scan_channel)
+		return -EAGAIN;
+
+#if  !defined(CSCAN)
+	if (g_ss_cache_ctrl.m_timer_expired) {
+		wl_iw_free_ss_cache();
+		g_ss_cache_ctrl.m_timer_expired ^= 1;
+	}
+	if ((!g_scan_specified_ssid && g_ss_cache_ctrl.m_prev_scan_mode) ||
+		g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+		g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+		
+		wl_iw_reset_ss_cache();
+	}
+	g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+	if (g_scan_specified_ssid) {
+		g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+	}
+	else {
+		g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+	}
+#endif 
+
+
+	
+	if (g_scan_specified_ssid) {
+		
+		list = kmalloc(len, GFP_KERNEL);
+		if (!list) {
+			WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n", dev->name));
+			g_scan_specified_ssid = 0;
+			return -ENOMEM;
+		}
+	}
+
+	memset(list, 0, len);
+	list->buflen = htod32(len);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len))) {
+		WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name, __FUNCTION__, error));
+		dwrq->length = len;
+		if (g_scan_specified_ssid) {
+			g_scan_specified_ssid = 0;
+			kfree(list);
+		}
+		return 0;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	
+	if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+		          __FUNCTION__, list->version));
+		if (g_scan_specified_ssid) {
+			g_scan_specified_ssid = 0;
+			kfree(list);
+		}
+		return -EINVAL;
+	}
+
+#if  !defined(CSCAN)
+	if (g_scan_specified_ssid) {
+		
+		wl_iw_add_bss_to_ss_cache(list);
+		kfree(list);
+	}
+#endif
+
+#if  !defined(CSCAN)
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+#if defined(WL_IW_USE_ISCAN)
+	if (g_scan_specified_ssid)
+		WL_TRACE(("%s: Specified scan APs from scan=%d\n", __FUNCTION__, list->count));
+	p_buf = iscan->list_hdr;
+	
+	while (p_buf != iscan->list_cur) {
+		list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+		WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+		counter += list_merge->count;
+		if (list_merge->count > 0)
+			len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+			    extra+len_ret, buflen_from_user -len_ret);
+		p_buf = p_buf->next;
+	}
+	WL_TRACE(("%s merged with total Bcast APs=%d\n", __FUNCTION__, counter));
+#else
+	list_merge = (wl_scan_results_t *) g_scan;
+	len_ret = (__u16) wl_iw_get_scan_prep(list_merge, info, extra, buflen_from_user);
+#endif 
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+	if (g_ss_cache_ctrl.m_link_down) {
+		
+		wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+	}
+	
+	wl_iw_merge_scan_cache(info, extra+len_ret, buflen_from_user-len_ret, &merged_len);
+	len_ret += merged_len;
+	wl_iw_run_ss_cache_timer(0);
+	wl_iw_run_ss_cache_timer(1);
+#else	
+
+	
+	if (g_scan_specified_ssid) {
+		WL_TRACE(("%s: Specified scan APs in the list =%d\n", __FUNCTION__, list->count));
+		len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+		kfree(list);
+
+#if defined(WL_IW_USE_ISCAN)
+		p_buf = iscan->list_hdr;
+		
+		while (p_buf != iscan->list_cur) {
+			list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+			WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+			if (list_merge->count > 0)
+				len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info,
+				    extra+len_ret, buflen_from_user -len_ret);
+			p_buf = p_buf->next;
+		}
+#else
+		list_merge = (wl_scan_results_t *) g_scan;
+		WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count));
+		if (list_merge->count > 0)
+			len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, extra+len_ret,
+				buflen_from_user -len_ret);
+#endif 
+	}
+	else {
+		list = (wl_scan_results_t *) g_scan;
+		len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user);
+	}
+#endif	
+
+#if defined(WL_IW_USE_ISCAN)
+	
+	g_scan_specified_ssid = 0;
+#endif 
+	
+	if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user)
+		len = len_ret;
+
+	dwrq->length = len;
+	dwrq->flags = 0;	
+
+	WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, list->count));
+	return 0;
+}
+#endif 
+
+#if defined(WL_IW_USE_ISCAN)
+static int
+wl_iw_iscan_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int ii, j;
+	int apcnt;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+	uint32  counter = 0;
+	uint8   channel;
+#if !defined(CSCAN)
+	__u16 merged_len = 0;
+	uint buflen_from_user = dwrq->length;
+#endif
+
+	WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length));
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		return -EINVAL;
+	}
+#endif
+
+	if (!extra) {
+		WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n", dev->name));
+		return -EINVAL;
+	}
+
+#if defined(CONFIG_FIRST_SCAN)
+	if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) {
+		WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n",
+		          dev->name, __FUNCTION__));
+		return -EAGAIN;
+	}
+#endif	
+	
+	if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) {
+		WL_ERROR(("%ssysioc_pid\n", __FUNCTION__));
+		return EAGAIN;
+	}
+
+	
+
+#if !defined(CSCAN)
+	if (g_ss_cache_ctrl.m_timer_expired) {
+		wl_iw_free_ss_cache();
+		g_ss_cache_ctrl.m_timer_expired ^= 1;
+	}
+	if (g_scan_specified_ssid) {
+		return wl_iw_get_scan(dev, info, dwrq, extra);
+	}
+	else {
+		if (g_ss_cache_ctrl.m_link_down) {
+			
+			wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid);
+		}
+		if (g_ss_cache_ctrl.m_prev_scan_mode || g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) {
+			g_ss_cache_ctrl.m_cons_br_scan_cnt = 0;
+			
+			wl_iw_reset_ss_cache();
+		}
+		g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid;
+		g_ss_cache_ctrl.m_cons_br_scan_cnt++;
+	}
+#endif 
+
+	WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name));
+	apcnt = 0;
+	p_buf = iscan->list_hdr;
+	
+	while (p_buf != iscan->list_cur) {
+		list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+		counter += list->count;
+
+		if (list->version != WL_BSS_INFO_VERSION) {
+			WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+			          __FUNCTION__, list->version));
+			return -EINVAL;
+		}
+
+		bi = NULL;
+		for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+			bi = (bi ?
+			      (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) :
+			      list->bss_info);
+			ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			                                              WLC_IW_ISCAN_MAXLEN));
+
+			
+			if (event + ETHER_ADDR_LEN + bi->SSID_len +
+			    IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >= end)
+				return -E2BIG;
+			
+			iwe.cmd = SIOCGIWAP;
+			iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+			memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+			
+			iwe.u.data.length = dtoh32(bi->SSID_len);
+			iwe.cmd = SIOCGIWESSID;
+			iwe.u.data.flags = 1;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+			
+			if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+				iwe.cmd = SIOCGIWMODE;
+				if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+					iwe.u.mode = IW_MODE_INFRA;
+				else
+					iwe.u.mode = IW_MODE_ADHOC;
+				event = IWE_STREAM_ADD_EVENT(info, event, end,
+				                             &iwe, IW_EV_UINT_LEN);
+			}
+
+			
+			iwe.cmd = SIOCGIWFREQ;
+			channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+			iwe.u.freq.m = wf_channel2mhz(channel,
+			                              channel <= CH_MAX_2G_CHANNEL ?
+			                              WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+			iwe.u.freq.e = 6;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+			
+			iwe.cmd = IWEVQUAL;
+			iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+			iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+			iwe.u.qual.noise = 0x100 + bi->phy_noise;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+			
+			wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+			
+			iwe.cmd = SIOCGIWENCODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+				iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+			else
+				iwe.u.data.flags = IW_ENCODE_DISABLED;
+			iwe.u.data.length = 0;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+			
+			if (bi->rateset.count) {
+				if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+					return -E2BIG;
+
+				value = event + IW_EV_LCP_LEN;
+				iwe.cmd = SIOCGIWRATE;
+				
+				iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+				for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+					iwe.u.bitrate.value =
+					        (bi->rateset.rates[j] & 0x7f) * 500000;
+					value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					                             IW_EV_PARAM_LEN);
+				}
+				event = value;
+			}
+		}
+		p_buf = p_buf->next;
+	} 
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	
+
+#if !defined(CSCAN)
+	
+	wl_iw_merge_scan_cache(info, event, buflen_from_user - dwrq->length, &merged_len);
+	dwrq->length += merged_len;
+	wl_iw_run_ss_cache_timer(0);
+	wl_iw_run_ss_cache_timer(1);
+#endif 
+	
+#if defined(CONFIG_FIRST_SCAN)
+	g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+#endif 
+
+	WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter));
+
+	return 0;
+}
+#endif 
+
+#define WL_JOIN_PARAMS_MAX 1600
+#ifdef CONFIG_PRESCANNED
+static int
+check_prescan(wl_join_params_t *join_params, int *join_params_size)
+{
+	int cnt = 0;
+	int indx = 0;
+	wl_iw_ss_cache_t *node = NULL;
+	wl_bss_info_t *bi = NULL;
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * buf;
+	wl_scan_results_t *list;
+	char *destbuf;
+
+	buf = iscan->list_hdr;
+
+	while (buf) {
+		list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+		bi = NULL;
+		for (indx = 0;  indx < list->count; indx++) {
+			bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length))
+				: list->bss_info;
+			if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+				continue;
+			if ((dtoh32(bi->SSID_len) != join_params->ssid.SSID_len) ||
+				memcmp(bi->SSID, join_params->ssid.SSID,
+				join_params->ssid.SSID_len))
+				continue;
+			memcpy(&join_params->params.chanspec_list[cnt],
+				&bi->chanspec, sizeof(chanspec_t));
+			WL_ERROR(("iscan : chanspec :%d, count %d \n", bi->chanspec, cnt));
+			cnt++;
+		}
+		buf = buf->next;
+	}
+
+	if (!cnt) {
+		MUTEX_LOCK_WL_SCAN_SET();
+		node = g_ss_cache_ctrl.m_cache_head;
+		for (; node; ) {
+			if (!memcmp(&node->bss_info->SSID, join_params->ssid.SSID,
+				join_params->ssid.SSID_len)) {
+				memcpy(&join_params->params.chanspec_list[cnt],
+					&node->bss_info->chanspec, sizeof(chanspec_t));
+				WL_ERROR(("cache_scan : chanspec :%d, count %d \n",
+				(int)node->bss_info->chanspec, cnt));
+				cnt++;
+			}
+			node = node->next;
+		}
+		MUTEX_UNLOCK_WL_SCAN_SET();
+	}
+
+	if (!cnt) {
+		return 0;
+	}
+
+	destbuf = (char *)&join_params->params.chanspec_list[cnt];
+	*join_params_size = destbuf - (char*)join_params;
+	join_params->ssid.SSID_len = htod32(g_ssid.SSID_len);
+	memcpy(&(join_params->params.bssid), &ether_bcast, ETHER_ADDR_LEN);
+	join_params->params.chanspec_num = htod32(cnt);
+
+	if ((*join_params_size) > WL_JOIN_PARAMS_MAX) {
+		WL_ERROR(("can't fit bssids for all %d APs found\n", cnt));
+			kfree(join_params);
+		return 0;
+	}
+
+	WL_ERROR(("Passing %d channel/bssid pairs.\n", cnt));
+	return cnt;
+}
+#endif 
+
+static int
+wl_iw_set_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	int error;
+	wl_join_params_t *join_params;
+	int join_params_size;
+
+	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+	RETURN_IF_EXTRA_NULL(extra);
+
+#ifdef OEM_CHROMIUMOS
+	if (g_set_essid_before_scan)
+		return -EAGAIN;
+#endif
+	if (!(join_params = kmalloc(WL_JOIN_PARAMS_MAX, GFP_KERNEL))) {
+		WL_ERROR(("allocation failed for join_params size is %d\n", WL_JOIN_PARAMS_MAX));
+		return -ENOMEM;
+	}
+
+	memset(join_params, 0, WL_JOIN_PARAMS_MAX);
+
+	
+	memset(&g_ssid, 0, sizeof(g_ssid));
+
+	if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+		g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length);
+#else
+		g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length-1);
+#endif
+		memcpy(g_ssid.SSID, extra, g_ssid.SSID_len);
+
+#ifdef CONFIG_PRESCANNED
+		memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+		join_params->ssid.SSID_len = g_ssid.SSID_len;
+
+		if (check_prescan(join_params, &join_params_size)) {
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID,
+				join_params, join_params_size))) {
+				WL_ERROR(("Invalid ioctl data=%d\n", error));
+				kfree(join_params);
+				return error;
+			}
+			kfree(join_params);
+			return 0;
+		} else {
+			WL_ERROR(("No matched found\n Trying to join to specific channel\n"));
+		}
+#endif 
+	} else {
+		
+		g_ssid.SSID_len = 0;
+	}
+	g_ssid.SSID_len = htod32(g_ssid.SSID_len);
+
+	
+	memset(join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params->ssid);
+
+	memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
+	join_params->ssid.SSID_len = htod32(g_ssid.SSID_len);
+	memcpy(&(join_params->params.bssid), &ether_bcast, ETHER_ADDR_LEN);
+
+	
+	
+	wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, join_params, &join_params_size);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, join_params, join_params_size))) {
+		WL_ERROR(("Invalid ioctl data=%d\n", error));
+		return error;
+	}
+
+	if (g_ssid.SSID_len) {
+		WL_ERROR(("%s: join SSID=%s ch=%d\n", __FUNCTION__,
+			g_ssid.SSID,  g_wl_iw_params.target_channel));
+	}
+	kfree(join_params);
+	return 0;
+}
+
+static int
+wl_iw_get_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+		WL_ERROR(("Error getting the SSID\n"));
+		return error;
+	}
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	
+	memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+	dwrq->length = ssid.SSID_len;
+
+	dwrq->flags = 1; 
+
+	return 0;
+}
+
+static int
+wl_iw_set_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+
+	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	
+	if (dwrq->length > sizeof(iw->nickname))
+		return -E2BIG;
+
+	memcpy(iw->nickname, extra, dwrq->length);
+	iw->nickname[dwrq->length - 1] = '\0';
+
+	return 0;
+}
+
+static int
+wl_iw_get_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+
+	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	strcpy(extra, iw->nickname);
+	dwrq->length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	wl_rateset_t rateset;
+	int error, rate, i, error_bg, error_a;
+
+	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+
+	rateset.count = dtoh32(rateset.count);
+
+	if (vwrq->value < 0) {
+		
+		rate = rateset.rates[rateset.count - 1] & 0x7f;
+	} else if (vwrq->value < rateset.count) {
+		
+		rate = rateset.rates[vwrq->value] & 0x7f;
+	} else {
+		
+		rate = vwrq->value / 500000;
+	}
+
+	if (vwrq->fixed) {
+		
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+		error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+	} else {
+		
+		
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+		
+		error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+
+		
+		for (i = 0; i < rateset.count; i++)
+			if ((rateset.rates[i] & 0x7f) > rate)
+				break;
+		rateset.count = htod32(i);
+
+		
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+			return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rate;
+
+	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+		return error;
+	rate = dtoh32(rate);
+	vwrq->value = rate * 500000;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+	if (vwrq->disabled)
+		rts = DOT11_DEFAULT_RTS_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+		return -EINVAL;
+	else
+		rts = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+		return error;
+
+	vwrq->value = rts;
+	vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, frag;
+
+	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+	if (vwrq->disabled)
+		frag = DOT11_DEFAULT_FRAG_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+		return -EINVAL;
+	else
+		frag = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, fragthreshold;
+
+	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+		return error;
+
+	vwrq->value = fragthreshold;
+	vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable;
+	uint16 txpwrmw;
+	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+	
+	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+	disable += WL_RADIO_SW_DISABLE << 16;
+
+	disable = htod32(disable);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+		return error;
+
+	
+	if (disable & WL_RADIO_SW_DISABLE)
+		return 0;
+
+	
+	if (!(vwrq->flags & IW_TXPOW_MWATT))
+		return -EINVAL;
+
+	
+	if (vwrq->value < 0)
+		return 0;
+
+	if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+	else txpwrmw = (uint16)vwrq->value;
+
+
+	error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+	return error;
+}
+
+static int
+wl_iw_get_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable, txpwrdbm;
+	uint8 result;
+
+	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+	    (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+		return error;
+
+	disable = dtoh32(disable);
+	result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	vwrq->value = (int32)bcm_qdbm_to_mw(result);
+	vwrq->fixed = 0;
+	vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+	vwrq->flags = IW_TXPOW_MWATT;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+	
+	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+		return -EINVAL;
+
+	
+	if (vwrq->flags & IW_RETRY_LIMIT) {
+
+		
+#if WIRELESS_EXT > 20
+	if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+		!((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+	if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif 
+			lrl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+				return error;
+		}
+
+		
+#if WIRELESS_EXT > 20
+	if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+		!((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif 
+			srl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+				return error;
+		}
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+	vwrq->disabled = 0;      
+
+	
+	if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+		return error;
+
+	lrl = dtoh32(lrl);
+	srl = dtoh32(srl);
+
+	
+	if (vwrq->flags & IW_RETRY_MAX) {
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+		vwrq->value = lrl;
+	} else {
+		vwrq->flags = IW_RETRY_LIMIT;
+		vwrq->value = srl;
+		if (srl != lrl)
+			vwrq->flags |= IW_RETRY_MIN;
+	}
+
+	return 0;
+}
+#endif 
+
+static int
+wl_iw_set_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec;
+
+	WL_TRACE(("%s: SIOCSIWENCODE index %d, len %d, flags %04x (%s%s%s%s%s)\n",
+		dev->name, dwrq->flags & IW_ENCODE_INDEX, dwrq->length, dwrq->flags,
+		dwrq->flags & IW_ENCODE_NOKEY ? "NOKEY" : "",
+		dwrq->flags & IW_ENCODE_DISABLED ? " DISABLED" : "",
+		dwrq->flags & IW_ENCODE_RESTRICTED ? " RESTRICTED" : "",
+		dwrq->flags & IW_ENCODE_OPEN ? " OPEN" : "",
+		dwrq->flags & IW_ENCODE_TEMP ? " TEMP" : ""));
+
+	memset(&key, 0, sizeof(key));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = htod32(key.index);
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+		
+		if (key.index == DOT11_MAX_DEFAULT_KEYS)
+			key.index = 0;
+	} else {
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+		if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+			return -EINVAL;
+	}
+
+	
+	if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+		
+		val = htod32(key.index);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+			return error;
+	} else {
+		key.len = dwrq->length;
+
+		if (dwrq->length > sizeof(key.data))
+			return -EINVAL;
+
+		memcpy(key.data, extra, dwrq->length);
+
+		key.flags = WL_PRIMARY_KEY;
+		switch (key.len) {
+		case WEP1_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP1;
+			break;
+		case WEP128_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP128;
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+		case TKIP_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_TKIP;
+			break;
+#endif
+		case AES_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		
+		swap_key_from_BE(&key);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+			return error;
+	}
+
+	
+	val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+	if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+		return error;
+
+	wsec  &= ~(WEP_ENABLED);
+	wsec |= val;
+
+	if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+		return error;
+
+	
+	val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+	val = htod32(val);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec, auth;
+
+	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+	
+	bzero(&key, sizeof(wl_wsec_key_t));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = key.index;
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+	} else
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+		key.index = 0;
+
+	
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+		return error;
+
+	swap_key_to_BE(&key);
+
+	wsec = dtoh32(wsec);
+	auth = dtoh32(auth);
+	
+	dwrq->length = MIN(DOT11_MAX_KEY_SIZE, key.len);
+
+	
+	dwrq->flags = key.index + 1;
+	if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+		
+		dwrq->flags |= IW_ENCODE_DISABLED;
+	}
+	if (auth) {
+		
+		dwrq->flags |= IW_ENCODE_RESTRICTED;
+	}
+
+	
+	if (dwrq->length && extra)
+		memcpy(extra, key.data, dwrq->length);
+
+	return 0;
+}
+
+static int
+wl_iw_set_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+	pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+	pm = htod32(pm);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+		return error;
+
+	pm = dtoh32(pm);
+	vwrq->disabled = pm ? 0 : 1;
+	vwrq->flags = IW_POWER_ALL_R;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+
+	WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+	RETURN_IF_EXTRA_NULL(extra);
+
+#ifdef DHD_DEBUG
+	{
+		int i;
+
+		for (i = 0; i < iwp->length; i++)
+			WL_TRACE(("%02X ", extra[i]));
+		WL_TRACE(("\n"));
+	}
+#endif
+
+		dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	iwp->length = 64;
+	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+	return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error;
+	struct iw_encode_ext *iwe;
+
+	WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+	RETURN_IF_EXTRA_NULL(extra);
+
+	memset(&key, 0, sizeof(key));
+	iwe = (struct iw_encode_ext *)extra;
+
+	
+	if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+	}
+
+	
+	key.index = 0;
+	if (dwrq->flags & IW_ENCODE_INDEX)
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	key.len = iwe->key_len;
+
+	
+	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+	
+	if (key.len == 0) {
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+			
+			key.index = htod32(key.index);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+				&key.index, sizeof(key.index));
+			if (error)
+				return error;
+		}
+		
+		else {
+			swap_key_from_BE(&key);
+			dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		}
+	}
+	else {
+		if (iwe->key_len > sizeof(key.data))
+			return -EINVAL;
+
+		WL_WSEC(("Setting the key index %d\n", key.index));
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("key is a Primary Key\n"));
+			key.flags = WL_PRIMARY_KEY;
+		}
+
+		bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+		if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+			uint8 keybuf[8];
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+
+		
+		if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+			uchar *ivptr;
+			ivptr = (uchar *)iwe->rx_seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = TRUE;
+		}
+
+		switch (iwe->alg) {
+			case IW_ENCODE_ALG_NONE:
+				key.algo = CRYPTO_ALGO_OFF;
+				break;
+			case IW_ENCODE_ALG_WEP:
+				if (iwe->key_len == WEP1_KEY_SIZE)
+					key.algo = CRYPTO_ALGO_WEP1;
+				else
+					key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			case IW_ENCODE_ALG_TKIP:
+				key.algo = CRYPTO_ALGO_TKIP;
+				break;
+			case IW_ENCODE_ALG_CCMP:
+				key.algo = CRYPTO_ALGO_AES_CCM;
+				break;
+			default:
+				break;
+		}
+		swap_key_from_BE(&key);
+
+		dhd_wait_pend8021x(dev);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		if (error)
+			return error;
+	}
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+struct {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+
+static int
+wl_iw_set_pmksa(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	struct iw_pmksa *iwpmksa;
+	uint i;
+	int ret = 0;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name));
+
+	RETURN_IF_EXTRA_NULL(extra);
+
+	iwpmksa = (struct iw_pmksa *)extra;
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+
+	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+		WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+		bzero((char *)&pmkid_list, sizeof(pmkid_list));
+	}
+
+	else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+		{
+			pmkid_list_t pmkid, *pmkidptr;
+			uint j;
+			pmkidptr = &pmkid;
+
+			bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+
+			WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+			WL_WSEC(("\n"));
+		}
+
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+
+		if ((pmkid_list.pmkids.npmkid > 0) && (i < pmkid_list.pmkids.npmkid)) {
+			bzero(&pmkid_list.pmkids.pmkid[i], sizeof(pmkid_t));
+			for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
+				bcopy(&pmkid_list.pmkids.pmkid[i+1].BSSID,
+					&pmkid_list.pmkids.pmkid[i].BSSID,
+					ETHER_ADDR_LEN);
+				bcopy(&pmkid_list.pmkids.pmkid[i+1].PMKID,
+					&pmkid_list.pmkids.pmkid[i].PMKID,
+					WPA2_PMKID_LEN);
+			}
+			pmkid_list.pmkids.npmkid--;
+		}
+		else
+			ret = -EINVAL;
+	}
+
+	else if (iwpmksa->cmd == IW_PMKSA_ADD) {
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_list.pmkids.pmkid[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+		if (i < MAXPMKID) {
+			bcopy(&iwpmksa->bssid.sa_data[0],
+				&pmkid_list.pmkids.pmkid[i].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&iwpmksa->pmkid[0], &pmkid_list.pmkids.pmkid[i].PMKID,
+				WPA2_PMKID_LEN);
+			if (i == pmkid_list.pmkids.npmkid)
+				pmkid_list.pmkids.npmkid++;
+		}
+		else
+			ret = -EINVAL;
+
+		{
+			uint j;
+			uint k;
+			k = pmkid_list.pmkids.npmkid;
+			WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[k].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[k].PMKID[j]));
+			WL_WSEC(("\n"));
+		}
+	}
+	WL_WSEC(("PRINTING pmkid LIST - No of elements %d", pmkid_list.pmkids.npmkid));
+	for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+		uint j;
+		WL_WSEC(("\nPMKID[%d]: %s = ", i,
+			bcm_ether_ntoa(&pmkid_list.pmkids.pmkid[i].BSSID,
+			eabuf)));
+		for (j = 0; j < WPA2_PMKID_LEN; j++)
+			WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]));
+	}
+	WL_WSEC(("\n"));
+
+	if (!ret)
+		ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list,
+			sizeof(pmkid_list));
+	return ret;
+}
+#endif 
+
+static int
+wl_iw_get_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	return 0;
+}
+
+
+static uint32
+wl_iw_create_wpaauth_wsec(struct net_device *dev)
+{
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+	uint32 wsec;
+
+	
+	if (iw->pcipher & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+		wsec = WEP_ENABLED;
+	else if (iw->pcipher & IW_AUTH_CIPHER_TKIP)
+		wsec = TKIP_ENABLED;
+	else if (iw->pcipher & IW_AUTH_CIPHER_CCMP)
+		wsec = AES_ENABLED;
+	else
+		wsec = 0;
+
+	
+	if (iw->gcipher & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+		wsec |= WEP_ENABLED;
+	else if (iw->gcipher & IW_AUTH_CIPHER_TKIP)
+		wsec |= TKIP_ENABLED;
+	else if (iw->gcipher & IW_AUTH_CIPHER_CCMP)
+		wsec |= AES_ENABLED;
+
+	
+	if (wsec == 0 && iw->privacy_invoked)
+		wsec = WEP_ENABLED;
+
+	WL_INFORM(("%s: returning wsec of %d\n", __FUNCTION__, wsec));
+
+	return wsec;
+}
+
+static int
+wl_iw_set_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error = 0;
+	int paramid;
+	int paramval;
+	int val = 0;
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+	paramval = vwrq->value;
+
+	WL_TRACE(("%s: SIOCSIWAUTH, %s(%d), paramval = 0x%0x\n",
+		dev->name,
+		paramid == IW_AUTH_WPA_VERSION ? "IW_AUTH_WPA_VERSION" :
+		paramid == IW_AUTH_CIPHER_PAIRWISE ? "IW_AUTH_CIPHER_PAIRWISE" :
+		paramid == IW_AUTH_CIPHER_GROUP ? "IW_AUTH_CIPHER_GROUP" :
+		paramid == IW_AUTH_KEY_MGMT ? "IW_AUTH_KEY_MGMT" :
+		paramid == IW_AUTH_TKIP_COUNTERMEASURES ? "IW_AUTH_TKIP_COUNTERMEASURES" :
+		paramid == IW_AUTH_DROP_UNENCRYPTED ? "IW_AUTH_DROP_UNENCRYPTED" :
+		paramid == IW_AUTH_80211_AUTH_ALG ? "IW_AUTH_80211_AUTH_ALG" :
+		paramid == IW_AUTH_WPA_ENABLED ? "IW_AUTH_WPA_ENABLED" :
+		paramid == IW_AUTH_RX_UNENCRYPTED_EAPOL ? "IW_AUTH_RX_UNENCRYPTED_EAPOL" :
+		paramid == IW_AUTH_ROAMING_CONTROL ? "IW_AUTH_ROAMING_CONTROL" :
+		paramid == IW_AUTH_PRIVACY_INVOKED ? "IW_AUTH_PRIVACY_INVOKED" :
+		"UNKNOWN",
+		paramid, paramval));
+
+#if defined(SOFTAP)
+	if (ap_cfg_running) {
+		WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__));
+		return 0;
+	}
+#endif
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		
+		iw->wpaversion = paramval;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+		iw->pcipher = paramval;
+		val = wl_iw_create_wpaauth_wsec(dev);
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+			return error;
+		break;
+
+	case IW_AUTH_CIPHER_GROUP:
+		iw->gcipher = paramval;
+		val = wl_iw_create_wpaauth_wsec(dev);
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+			return error;
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		if (paramval & IW_AUTH_KEY_MGMT_PSK) {
+			if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA)
+				val = WPA_AUTH_PSK;
+			else if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA2)
+				val = WPA2_AUTH_PSK;
+			else 
+				val = WPA_AUTH_DISABLED;
+		} else if (paramval & IW_AUTH_KEY_MGMT_802_1X) {
+			if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA)
+				val = WPA_AUTH_UNSPECIFIED;
+			else if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA2)
+				val = WPA2_AUTH_UNSPECIFIED;
+			else 
+				val = WPA_AUTH_DISABLED;
+		}
+		else
+			val = WPA_AUTH_DISABLED;
+
+		WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		
+		WL_INFORM(("Setting the D11auth %d\n", paramval));
+		if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
+			val = 0;
+		else if (paramval == IW_AUTH_ALG_SHARED_KEY)
+			val = 1;
+		else if (paramval == (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY))
+			val = 2;
+		else
+			error = 1;
+		if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (paramval == 0) {
+			iw->privacy_invoked = 0; 
+			iw->pcipher = 0;
+			iw->gcipher = 0;
+			val = wl_iw_create_wpaauth_wsec(dev);
+			if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+				return error;
+			WL_INFORM(("%s: %d: setting wpa_auth to %d, wsec to %d\n",
+				__FUNCTION__, __LINE__, paramval, val));
+			dev_wlc_intvar_set(dev, "wpa_auth", paramval);
+			return error;
+		}
+
+		
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		if ((error = dev_wlc_intvar_set(dev, "wsec_restrict", paramval)))
+			return error;
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+#if WIRELESS_EXT > 17
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED:
+		iw->privacy_invoked = paramval;
+		val = wl_iw_create_wpaauth_wsec(dev);
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+			return error;
+		break;
+
+#endif 
+	default:
+		break;
+	}
+	return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error;
+	int paramid;
+	int paramval = 0;
+	int val;
+	wl_iw_t *iw = NETDEV_PRIV(dev);
+
+	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		paramval = iw->wpaversion;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+		paramval = iw->pcipher;
+		break;
+
+	case IW_AUTH_CIPHER_GROUP:
+		paramval = iw->gcipher;
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (VAL_PSK(val))
+			paramval = IW_AUTH_KEY_MGMT_PSK;
+		else
+			paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_intvar_get(dev, "wsec_restrict", &paramval);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		
+		if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+			return error;
+		if (!val)
+			paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+		else
+			paramval = IW_AUTH_ALG_SHARED_KEY;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val)
+			paramval = TRUE;
+		else
+			paramval = FALSE;
+		break;
+#if WIRELESS_EXT > 17
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		paramval = iw->privacy_invoked;
+		break;
+
+#endif 
+	}
+	vwrq->value = paramval;
+	return 0;
+}
+#endif 
+
+
+#ifdef SOFTAP
+
+static int ap_macmode = MACLIST_MODE_DISABLED;
+static struct mflist ap_black_list;
+
+static int
+wl_iw_parse_wep(char *keystr, wl_wsec_key_t *key)
+{
+	char hex[] = "XX";
+	unsigned char *data = key->data;
+
+	switch (strlen(keystr)) {
+	case 5:
+	case 13:
+	case 16:
+		key->len = strlen(keystr);
+		memcpy(data, keystr, key->len + 1);
+		break;
+	case 12:
+	case 28:
+	case 34:
+	case 66:
+		
+		if (!strnicmp(keystr, "0x", 2))
+			keystr += 2;
+		else
+			return -1;
+		
+	case 10:
+	case 26:
+	case 32:
+	case 64:
+		key->len = strlen(keystr) / 2;
+		while (*keystr) {
+			strncpy(hex, keystr, 2);
+			*data++ = (char) bcm_strtoul(hex, NULL, 16);
+			keystr += 2;
+		}
+		break;
+	default:
+		return -1;
+	}
+
+	switch (key->len) {
+	case 5:
+		key->algo = CRYPTO_ALGO_WEP1;
+		break;
+	case 13:
+		key->algo = CRYPTO_ALGO_WEP128;
+		break;
+	case 16:
+		
+		key->algo = CRYPTO_ALGO_AES_CCM;
+		break;
+	case 32:
+		key->algo = CRYPTO_ALGO_TKIP;
+		break;
+	default:
+		return -1;
+	}
+
+	
+	key->flags |= WL_PRIMARY_KEY;
+
+	return 0;
+}
+
+#ifdef EXT_WPA_CRYPTO
+#define SHA1HashSize 20
+extern void pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+                        int iterations, u8 *buf, size_t buflen);
+
+#else
+
+#define SHA1HashSize 20
+static int
+pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len,
+            int iterations, u8 *buf, size_t buflen)
+{
+	WL_ERROR(("WARNING: %s is not implemented !!!\n", __FUNCTION__));
+	return -1;
+}
+
+#endif 
+
+
+static int
+dev_iw_write_cfg1_bss_var(struct net_device *dev, int val)
+{
+	struct {
+		int cfg;
+		int val;
+	} bss_setbuf;
+
+	int bss_set_res;
+	char smbuf[WLC_IOCTL_SMLEN];
+	memset(smbuf, 0, sizeof(smbuf));
+
+	bss_setbuf.cfg = 1;
+	bss_setbuf.val = val;
+
+	bss_set_res = dev_iw_iovar_setbuf(dev, "bss",
+		&bss_setbuf, sizeof(bss_setbuf), smbuf, sizeof(smbuf));
+	WL_TRACE(("%s: bss_set_result:%d set with %d\n", __FUNCTION__, bss_set_res, val));
+
+	return bss_set_res;
+}
+
+
+
+#ifndef AP_ONLY
+static int
+wl_bssiovar_mkbuf(
+		const char *iovar,
+		int bssidx,
+		void *param,
+		int paramlen,
+		void *bufptr,
+		int buflen,
+		int *perr)
+{
+	const char *prefix = "bsscfg:";
+	int8* p;
+	uint prefixlen;
+	uint namelen;
+	uint iolen;
+
+	prefixlen = strlen(prefix);	
+	namelen = strlen(iovar) + 1;	
+	iolen = prefixlen + namelen + sizeof(int) + paramlen;
+
+	
+	if (buflen < 0 || iolen > (uint)buflen) {
+		*perr = BCME_BUFTOOSHORT;
+		return 0;
+	}
+
+	p = (int8*)bufptr;
+
+	
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	
+	memcpy(p, iovar, namelen);
+	p += namelen;
+
+	
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(int32));
+	p += sizeof(int32);
+
+	
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	*perr = 0;
+	return iolen;
+}
+#endif 
+
+
+
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+
+#if defined(CSCAN)
+
+
+
+static int
+wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, int nchan)
+{
+	int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
+	int err = 0;
+	char *p;
+	int i;
+	iscan_info_t *iscan = g_iscan;
+
+	WL_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan));
+
+	if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) {
+		WL_ERROR(("%s error exit\n", __FUNCTION__));
+		err = -1;
+		goto exit;
+	}
+
+#ifdef PNO_SUPPORT
+	
+	if  (dhd_dev_get_pno_status(dev)) {
+		WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__));
+	}
+#endif 
+
+	params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+
+	
+	if (nssid > 0) {
+		i = OFFSETOF(wl_scan_params_t, channel_list) + nchan * sizeof(uint16);
+		i = ROUNDUP(i, sizeof(uint32));
+		if (i + nssid * sizeof(wlc_ssid_t) > params_size) {
+			printf("additional ssids exceed params_size\n");
+			err = -1;
+			goto exit;
+		}
+
+		p = ((char*)&iscan->iscan_ex_params_p->params) + i;
+		memcpy(p, ssids_local, nssid * sizeof(wlc_ssid_t));
+		p += nssid * sizeof(wlc_ssid_t);
+	} else {
+		p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16);
+	}
+
+	
+	iscan->iscan_ex_params_p->params.channel_num =
+	        htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	               (nchan & WL_SCAN_PARAMS_COUNT_MASK));
+
+	nssid = (uint)
+	        ((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) &
+	         WL_SCAN_PARAMS_COUNT_MASK);
+
+	
+	params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t));
+	iscan->iscan_ex_param_size = params_size;
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+	wl_iw_set_event_mask(dev);
+	mod_timer(&iscan->timer, jiffies + iscan->timer_ms*HZ/1000);
+
+	iscan->timer_on = 1;
+
+#ifdef SCAN_DUMP
+	{
+		int i;
+		WL_SCAN(("\n### List of SSIDs to scan ###\n"));
+		for (i = 0; i < nssid; i++) {
+			if (!ssids_local[i].SSID_len)
+				WL_SCAN(("%d: Broadcast scan\n", i));
+			else
+			WL_SCAN(("%d: scan  for  %s size =%d\n", i,
+				ssids_local[i].SSID, ssids_local[i].SSID_len));
+		}
+		WL_SCAN(("### List of channels to scan ###\n"));
+		for (i = 0; i < nchan; i++)
+		{
+			WL_SCAN(("%d ", iscan->iscan_ex_params_p->params.channel_list[i]));
+		}
+		WL_SCAN(("\nnprobes=%d\n", iscan->iscan_ex_params_p->params.nprobes));
+		WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time));
+		WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time));
+		WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
+		WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
+		WL_SCAN(("\n###################\n"));
+	}
+#endif 
+
+	if (params_size > WLC_IOCTL_MEDLEN) {
+			WL_ERROR(("Set ISCAN for %s due to params_size=%d  \n",
+				__FUNCTION__, params_size));
+			err = -1;
+	}
+
+	if ((err = dev_iw_iovar_setbuf(dev, "iscan", iscan->iscan_ex_params_p,
+	                               iscan->iscan_ex_param_size,
+	                               iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) {
+		WL_TRACE(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err));
+		err = -1;
+	}
+
+exit:
+	return err;
+}
+
+
+static int
+iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info,
+                 union iwreq_data *wrqu, char *ext)
+{
+	int res;
+	char  *extra = NULL;
+	iscan_info_t *iscan = g_iscan;
+	wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+	int nssid = 0;
+	int nchan = 0;
+	char *str_ptr;
+
+	WL_TRACE(("%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	if (wrqu->data.length == 0) {
+		WL_ERROR(("IWPRIV argument len = 0\n"));
+		return -EINVAL;
+	}
+
+	if (!iscan->iscan_ex_params_p) {
+		return -EFAULT;
+	}
+
+	if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+		return -ENOMEM;
+
+	if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+		res = -EFAULT;
+		goto exit_proc;
+	}
+
+	extra[wrqu->data.length] = 0;
+	WL_ERROR(("Got str param in iw_point:\n %s\n", extra));
+
+	str_ptr = extra;
+
+	
+	if (strncmp(str_ptr, GET_SSID, strlen(GET_SSID))) {
+		WL_ERROR(("%s Error: extracting SSID='' string\n", __FUNCTION__));
+		res = -EINVAL;
+		goto exit_proc;
+	}
+
+	str_ptr += strlen(GET_SSID);
+	nssid = wl_iw_parse_ssid_list(&str_ptr, ssids_local, nssid,
+	                              WL_SCAN_PARAMS_SSID_MAX);
+	if (nssid == -1) {
+		WL_ERROR(("%s wrong ssid list", __FUNCTION__));
+		res = -EINVAL;
+		goto exit_proc;
+	}
+
+	memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+	ASSERT(iscan->iscan_ex_param_size < WLC_IOCTL_MAXLEN);
+
+	
+	wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+	iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+	iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+	iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+	
+	if ((nchan = wl_iw_parse_channel_list(&str_ptr,
+	                                      &iscan->iscan_ex_params_p->params.channel_list[0],
+	                                      WL_NUMCHANNELS)) == -1) {
+		WL_ERROR(("%s missing channel list\n", __FUNCTION__));
+		res = -EINVAL;
+		goto exit_proc;
+	}
+
+	
+	get_parameter_from_string(&str_ptr,
+	                          GET_NPROBE, PTYPE_INTDEC,
+	                          &iscan->iscan_ex_params_p->params.nprobes, 2);
+
+	get_parameter_from_string(&str_ptr, GET_ACTIVE_ASSOC_DWELL, PTYPE_INTDEC,
+	                          &iscan->iscan_ex_params_p->params.active_time, 4);
+
+	get_parameter_from_string(&str_ptr, GET_PASSIVE_ASSOC_DWELL, PTYPE_INTDEC,
+	                          &iscan->iscan_ex_params_p->params.passive_time, 4);
+
+	get_parameter_from_string(&str_ptr, GET_HOME_DWELL, PTYPE_INTDEC,
+	                          &iscan->iscan_ex_params_p->params.home_time, 4);
+
+	get_parameter_from_string(&str_ptr, GET_SCAN_TYPE, PTYPE_INTDEC,
+	                          &iscan->iscan_ex_params_p->params.scan_type, 1);
+
+	
+	res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+exit_proc:
+	kfree(extra);
+
+	return res;
+}
+
+
+static int
+wl_iw_set_cscan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int res = -1;
+	iscan_info_t *iscan = g_iscan;
+	wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX];
+	int nssid = 0;
+	int nchan = 0;
+	cscan_tlv_t *cscan_tlv_temp;
+	char type;
+	char *str_ptr;
+	int tlv_size_left;
+#ifdef TLV_DEBUG
+	int i;
+	char tlv_in_example[] = {
+		'C', 'S', 'C', 'A', 'N', ' ',
+		0x53, 0x01, 0x00, 0x00,
+		'S',	  
+		0x00, 
+		'S',    
+		0x04, 
+		'B', 'R', 'C', 'M',
+		'C',
+		0x06, 
+		'P', 
+		0x94,
+		0x11,
+		'T',     
+		0x01  
+	};
+#endif 
+
+	WL_TRACE(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n",
+		__FUNCTION__, info->cmd, info->flags,
+		wrqu->data.pointer, wrqu->data.length));
+
+	net_os_wake_lock(dev);
+
+	if (g_onoff == G_WLAN_SET_OFF) {
+		WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (wrqu->data.length < (strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))) {
+		WL_ERROR(("%s aggument=%d  less %d\n", __FUNCTION__,
+			wrqu->data.length, strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t)));
+		return -1;
+	}
+
+#ifdef TLV_DEBUG
+	memcpy(extra, tlv_in_example, sizeof(tlv_in_example));
+	wrqu->data.length = sizeof(tlv_in_example);
+	for (i = 0; i < wrqu->data.length; i++)
+		printf("%02X ", extra[i]);
+	printf("\n");
+#endif 
+
+	str_ptr = extra;
+	str_ptr +=  strlen(CSCAN_COMMAND);
+	tlv_size_left = wrqu->data.length - strlen(CSCAN_COMMAND);
+
+	cscan_tlv_temp = (cscan_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+	
+	if ((cscan_tlv_temp->prefix == CSCAN_TLV_PREFIX) &&
+		(cscan_tlv_temp->version == CSCAN_TLV_VERSION) &&
+		(cscan_tlv_temp->subver == CSCAN_TLV_SUBVERSION))
+	{
+		str_ptr += sizeof(cscan_tlv_t);
+		tlv_size_left  -= sizeof(cscan_tlv_t);
+
+		
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+			WL_SCAN_PARAMS_SSID_MAX, &tlv_size_left)) <= 0) {
+			WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		}
+		else {
+			
+			memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size);
+
+			
+			wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL);
+			iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION);
+			iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START);
+			iscan->iscan_ex_params_p->scan_duration = htod16(0);
+
+			
+			while (tlv_size_left > 0)
+			{
+			type = str_ptr[0];
+			switch (type) {
+				case CSCAN_TLV_TYPE_CHANNEL_IE:
+					
+					if ((nchan = wl_iw_parse_channel_list_tlv(&str_ptr,
+					&iscan->iscan_ex_params_p->params.channel_list[0],
+					WL_NUMCHANNELS, &tlv_size_left)) == -1) {
+					WL_ERROR(("%s missing channel list\n",
+						__FUNCTION__));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_NPROBE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr,
+						&iscan->iscan_ex_params_p->params.nprobes,
+						sizeof(iscan->iscan_ex_params_p->params.nprobes),
+						type, sizeof(char), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n",
+							__FUNCTION__, res));
+							goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_ACTIVE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr,
+					&iscan->iscan_ex_params_p->params.active_time,
+					sizeof(iscan->iscan_ex_params_p->params.active_time),
+					type, sizeof(short), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n",
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_PASSIVE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr,
+					&iscan->iscan_ex_params_p->params.passive_time,
+					sizeof(iscan->iscan_ex_params_p->params.passive_time),
+					type, sizeof(short), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n",
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_HOME_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr,
+					&iscan->iscan_ex_params_p->params.home_time,
+					sizeof(iscan->iscan_ex_params_p->params.home_time),
+					type, sizeof(short), &tlv_size_left)) == -1) {
+						WL_ERROR(("%s return %d\n",
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+				case CSCAN_TLV_TYPE_STYPE_IE:
+					if ((res = wl_iw_parse_data_tlv(&str_ptr,
+					&iscan->iscan_ex_params_p->params.scan_type,
+					sizeof(iscan->iscan_ex_params_p->params.scan_type),
+					type, sizeof(char), &tlv_size_left)) == -1) {
+					WL_ERROR(("%s return %d\n",
+						__FUNCTION__, res));
+						goto exit_proc;
+					}
+				break;
+
+				default :
+					WL_ERROR(("%s get unkwown type %X\n",
+					__FUNCTION__, type));
+					goto exit_proc;
+				break;
+				}
+			} 
+			}
+		}
+		else {
+			WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+			goto exit_proc;
+		}
+
+#if defined(CONFIG_FIRST_SCAN)
+		if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) {
+			if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) {
+				
+				WL_ERROR(("%s Clean up First scan flag which is %d\n",
+					__FUNCTION__, g_first_broadcast_scan));
+				g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED;
+			}
+			else {
+				WL_ERROR(("%s Ignoring CSCAN : First Scan is not done yet %d\n",
+					__FUNCTION__, g_first_counter_scans));
+				return -EBUSY;
+			}
+		}
+#endif 
+
+		
+		res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan);
+
+exit_proc:
+	net_os_wake_unlock(dev);
+	return res;
+}
+
+#endif 
+
+
+
+#ifdef SOFTAP
+#ifndef AP_ONLY
+
+
+static int
+thr_wait_for_2nd_eth_dev(void *data)
+{
+	wl_iw_t *iw;
+	int ret = 0;
+	unsigned long flags = 0;
+
+	tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data;
+	struct net_device *dev = (struct net_device *)tsk_ctl->parent;
+	iw = *(wl_iw_t **)netdev_priv(dev);
+
+	DAEMONIZE("wl0_eth_wthread");
+
+
+	WL_SOFTAP(("\n>%s threda started:, PID:%x\n", __FUNCTION__, current->pid));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (!iw) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		tsk_ctl->thr_pid = -1;
+		complete(&tsk_ctl->completed);
+		return -1;
+	}
+	DHD_OS_WAKE_LOCK(iw->pub);
+	complete(&tsk_ctl->completed);
+	if (down_timeout(&tsk_ctl->sema,  msecs_to_jiffies(5000)) != 0) {
+#else
+	if (down_interruptible(&tsk_ctl->sema) != 0) {
+#endif 
+		WL_ERROR(("\n%s: sap_eth_sema timeout \n", __FUNCTION__));
+		ret = -1;
+		goto fail;
+	}
+
+	SMP_RD_BARRIER_DEPENDS();
+	if (tsk_ctl->terminated) {
+			ret = -1;
+			goto fail;
+	}
+
+	flags = dhd_os_spin_lock(iw->pub);
+	if (!ap_net_dev) {
+		WL_ERROR((" ap_net_dev is null !!!"));
+		ret = -1;
+		dhd_os_spin_unlock(iw->pub, flags);
+		goto fail;
+	}
+
+	WL_TRACE(("\n>%s: Thread:'softap ethdev IF:%s is detected !!!'\n\n",
+		__FUNCTION__, ap_net_dev->name));
+
+	ap_cfg_running = TRUE;
+
+	dhd_os_spin_unlock(iw->pub, flags);
+	bcm_mdelay(500); 
+
+	
+	wl_iw_send_priv_event(priv_dev, "AP_SET_CFG_OK");
+
+fail:
+
+	DHD_OS_WAKE_UNLOCK(iw->pub);
+
+	WL_TRACE(("\n>%s, thread completed\n", __FUNCTION__));
+
+	complete_and_exit(&tsk_ctl->completed, 0);
+	return ret;
+}
+#endif 
+#ifndef AP_ONLY
+static int last_auto_channel = 6;
+#endif
+static int
+get_softap_auto_channel(struct net_device *dev, struct ap_profile *ap)
+{
+	int chosen = 0;
+	wl_uint32_list_t request;
+	int rescan = 0;
+	int retry = 0;
+	int updown = 0;
+	int ret = 0;
+	wlc_ssid_t null_ssid;
+	int res = 0;
+#ifndef AP_ONLY
+	int iolen = 0;
+	int mkvar_err = 0;
+	int bsscfg_index = 1;
+	char buf[WLC_IOCTL_SMLEN];
+#endif
+	WL_SOFTAP(("Enter %s\n", __FUNCTION__));
+
+#ifndef AP_ONLY
+	if (ap_cfg_running) {
+		ap->channel = last_auto_channel;
+		return res;
+	}
+#endif
+	memset(&null_ssid, 0, sizeof(wlc_ssid_t));
+	res |= dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown));
+#ifdef AP_ONLY
+	res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid));
+#else
+	iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&null_ssid),
+		null_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+	ASSERT(iolen);
+	res |= dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen);
+#endif
+	auto_channel_retry:
+			request.count = htod32(0);
+			ret = dev_wlc_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request));
+			if (ret < 0) {
+				WL_ERROR(("can't start auto channel scan\n"));
+				goto fail;
+			}
+
+	get_channel_retry:
+			bcm_mdelay(500);
+
+			ret = dev_wlc_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+			if (ret < 0 || dtoh32(chosen) == 0) {
+				if (retry++ < 3)
+					goto get_channel_retry;
+				else {
+					WL_ERROR(("can't get auto channel sel, err = %d, "
+					          "chosen = %d\n", ret, chosen));
+					goto fail;
+				}
+			}
+			if ((chosen == 1) && (!rescan++))
+				goto auto_channel_retry;
+			WL_SOFTAP(("Set auto channel = %d\n", chosen));
+			ap->channel = chosen;
+			if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown))) < 0) {
+				WL_ERROR(("%s fail to set up err =%d\n", __FUNCTION__, res));
+				goto fail;
+			}
+#ifndef AP_ONLY
+	if (!res)
+		last_auto_channel = ap->channel;
+#endif
+
+fail :
+	return res;
+} 
+
+
+static int
+set_ap_cfg(struct net_device *dev, struct ap_profile *ap)
+{
+	int updown = 0;
+	int channel = 0;
+
+	wlc_ssid_t ap_ssid;
+	int max_assoc = 8;
+
+	int res = 0;
+	int apsta_var = 0;
+#ifndef AP_ONLY
+	int mpc = 0;
+	int iolen = 0;
+	int mkvar_err = 0;
+	int bsscfg_index = 1;
+	char buf[WLC_IOCTL_SMLEN];
+#endif
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	net_os_wake_lock(dev);
+	DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+	WL_SOFTAP(("wl_iw: set ap profile:\n"));
+	WL_SOFTAP(("	ssid = '%s'\n", ap->ssid));
+	WL_SOFTAP(("	security = '%s'\n", ap->sec));
+	if (ap->key[0] != '\0')
+		WL_SOFTAP(("	key = '%s'\n", ap->key));
+	WL_SOFTAP(("	channel = %d\n", ap->channel));
+	WL_SOFTAP(("	max scb = %d\n", ap->max_scb));
+
+#ifdef AP_ONLY
+	if (ap_cfg_running) {
+		wl_iw_softap_deassoc_stations(dev, NULL); 
+		ap_cfg_running = FALSE;
+	}
+#endif	
+
+	
+	if (ap_cfg_running == FALSE) {
+
+#ifndef AP_ONLY
+
+		
+		sema_init(&ap_eth_ctl.sema, 0);
+
+		mpc = 0;
+		if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) {
+			WL_ERROR(("%s fail to set mpc\n", __FUNCTION__));
+			goto fail;
+		}
+#endif
+
+		updown = 0;
+		if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown)))) {
+			WL_ERROR(("%s fail to set updown\n", __FUNCTION__));
+			goto fail;
+		}
+
+#ifdef AP_ONLY
+		
+		apsta_var = 0;
+		if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+			WL_ERROR(("%s fail to set apsta_var 0\n", __FUNCTION__));
+			goto fail;
+		}
+		apsta_var = 1;
+		if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) {
+			WL_ERROR(("%s fail to set apsta_var 1\n", __FUNCTION__));
+			goto fail;
+		}
+		res = dev_wlc_ioctl(dev, WLC_GET_AP, &apsta_var, sizeof(apsta_var));
+#else
+		
+		apsta_var = 1;
+		iolen = wl_bssiovar_mkbuf("apsta",
+			bsscfg_index,  &apsta_var, sizeof(apsta_var)+4,
+			buf, sizeof(buf), &mkvar_err);
+		ASSERT(iolen);
+		if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+			WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+			goto fail;
+		}
+		WL_TRACE(("\n>in %s: apsta set result: %d \n", __FUNCTION__, res));
+#endif 
+
+		updown = 1;
+		if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown))) < 0) {
+			WL_ERROR(("%s fail to set apsta \n", __FUNCTION__));
+			goto fail;
+		}
+
+	} else {
+		
+		if (!ap_net_dev) {
+			WL_ERROR(("%s: ap_net_dev is null\n", __FUNCTION__));
+			goto fail;
+		}
+
+		res = wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+
+		
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+			WL_ERROR(("%s fail to set bss down\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+
+
+	if (strlen(ap->country_code)) {
+		WL_ERROR(("%s: Igonored: Country MUST be specified"
+			"COUNTRY command with \n",	__FUNCTION__));
+	} else {
+		WL_SOFTAP(("%s: Country code is not specified,"
+			" will use Radio's default\n",
+			__FUNCTION__));
+
+	}
+	iolen = wl_bssiovar_mkbuf("closednet",
+		bsscfg_index,  &ap->closednet, sizeof(ap->closednet)+4,
+		buf, sizeof(buf), &mkvar_err);
+	ASSERT(iolen);
+	if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) {
+		WL_ERROR(("%s failed to set 'closednet'for apsta \n", __FUNCTION__));
+		goto fail;
+	}
+
+	
+	if ((ap->channel == 0) && (get_softap_auto_channel(dev, ap) < 0)) {
+		ap->channel = 1;
+		WL_ERROR(("%s auto channel failed, pick up channel=%d\n",
+		          __FUNCTION__, ap->channel));
+	}
+
+	channel = ap->channel;
+	if ((res = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel)))) {
+		WL_ERROR(("%s fail to set channel\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (ap_cfg_running == FALSE) {
+		updown = 0;
+		if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)))) {
+			WL_ERROR(("%s fail to set up\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+
+	max_assoc = ap->max_scb;
+	if ((res = dev_wlc_intvar_set(dev, "maxassoc", max_assoc))) {
+		WL_ERROR(("%s fail to set maxassoc\n", __FUNCTION__));
+		goto fail;
+	}
+
+	ap_ssid.SSID_len = strlen(ap->ssid);
+	strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+
+	
+#ifdef AP_ONLY
+	if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+		WL_ERROR(("ERROR:%d in:%s, wl_iw_set_ap_security is skipped\n",
+		          res, __FUNCTION__));
+		goto fail;
+	}
+	wl_iw_send_priv_event(dev, "ASCII_CMD=AP_BSS_START");
+	ap_cfg_running = TRUE;
+#else
+
+	iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&ap_ssid),
+		ap_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err);
+	ASSERT(iolen);
+	if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) != 0) {
+		WL_ERROR(("ERROR:%d in:%s, Security & BSS reconfiguration is skipped\n",
+		          res, __FUNCTION__));
+		goto fail;
+	}
+	if (ap_cfg_running == FALSE) {
+		
+		PROC_START(thr_wait_for_2nd_eth_dev, dev, &ap_eth_ctl, 0);
+	} else {
+		ap_eth_ctl.thr_pid = -1;
+		
+		if (ap_net_dev == NULL) {
+			WL_ERROR(("%s ERROR: ap_net_dev is NULL !!!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		WL_ERROR(("%s: %s Configure security & restart AP bss \n",
+		          __FUNCTION__, ap_net_dev->name));
+
+		
+		if ((res = wl_iw_set_ap_security(ap_net_dev, &my_ap)) < 0) {
+			WL_ERROR(("%s fail to set security : %d\n", __FUNCTION__, res));
+			goto fail;
+		}
+
+		
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) {
+			WL_ERROR(("%s fail to set bss up\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+#endif 
+fail:
+	WL_SOFTAP(("%s exit with %d\n", __FUNCTION__, res));
+
+	DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+	net_os_wake_unlock(dev);
+
+	return res;
+}
+#endif 
+
+
+
+static int
+wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap)
+{
+	int wsec = 0;
+	int wpa_auth = 0;
+	int res = 0;
+	int i;
+	char *ptr;
+#ifdef AP_ONLY
+	int mpc = 0;
+	wlc_ssid_t ap_ssid;
+#endif
+	wl_wsec_key_t key;
+
+	WL_SOFTAP(("\nsetting SOFTAP security mode:\n"));
+	WL_SOFTAP(("wl_iw: set ap profile:\n"));
+	WL_SOFTAP(("	ssid = '%s'\n", ap->ssid));
+	WL_SOFTAP(("	security = '%s'\n", ap->sec));
+	if (ap->key[0] != '\0')
+		WL_SOFTAP(("	key = '%s'\n", ap->key));
+	WL_SOFTAP(("	channel = %d\n", ap->channel));
+	WL_SOFTAP(("	max scb = %d\n", ap->max_scb));
+
+	if (strnicmp(ap->sec, "open", strlen("open")) == 0) {
+
+	   
+		wsec = 0;
+		res = dev_wlc_intvar_set(dev, "wsec", wsec);
+		wpa_auth = WPA_AUTH_DISABLED;
+		res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+		WL_SOFTAP(("=====================\n"));
+		WL_SOFTAP((" wsec & wpa_auth set 'OPEN', result:&d %d\n", res));
+		WL_SOFTAP(("=====================\n"));
+
+	} else if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+
+	   
+		memset(&key, 0, sizeof(key));
+
+		wsec = WEP_ENABLED;
+		res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+		key.index = 0;
+		if (wl_iw_parse_wep(ap->key, &key)) {
+			WL_SOFTAP(("wep key parse err!\n"));
+			return -1;
+		}
+
+		key.index = htod32(key.index);
+		key.len = htod32(key.len);
+		key.algo = htod32(key.algo);
+		key.flags = htod32(key.flags);
+
+		res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+
+		wpa_auth = WPA_AUTH_DISABLED;
+		res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+		WL_SOFTAP(("=====================\n"));
+		WL_SOFTAP((" wsec & auth set 'WEP', result:&d %d\n", res));
+		WL_SOFTAP(("=====================\n"));
+
+	} else if (strnicmp(ap->sec, "wpa2-psk", strlen("wpa2-psk")) == 0) {
+
+	   
+
+		wsec_pmk_t psk;
+		size_t key_len;
+
+		wsec = AES_ENABLED;
+		dev_wlc_intvar_set(dev, "wsec", wsec);
+
+		key_len = strlen(ap->key);
+		if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+			WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+			WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+			return -1;
+		}
+
+		
+		if (key_len < WSEC_MAX_PSK_LEN) {
+			unsigned char output[2*SHA1HashSize];
+			char key_str_buf[WSEC_MAX_PSK_LEN+1];
+
+			
+			memset(output, 0, sizeof(output));
+			pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+			
+			ptr = key_str_buf;
+			for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+				
+				sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4],
+				        (uint)output[i*4+1], (uint)output[i*4+2],
+				        (uint)output[i*4+3]);
+				ptr += 8;
+			}
+			WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf));
+
+			psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+			memcpy(psk.key, key_str_buf, psk.key_len);
+		} else {
+			psk.key_len = htod16((ushort) key_len);
+			memcpy(psk.key, ap->key, key_len);
+		}
+		psk.flags = htod16(WSEC_PASSPHRASE);
+		dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+		wpa_auth = WPA2_AUTH_PSK;
+		dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+	} else if (strnicmp(ap->sec, "wpa-psk", strlen("wpa-psk")) == 0) {
+
+		
+		wsec_pmk_t psk;
+		size_t key_len;
+
+		wsec = TKIP_ENABLED;
+		res = dev_wlc_intvar_set(dev, "wsec", wsec);
+
+		key_len = strlen(ap->key);
+		if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) {
+			WL_SOFTAP(("passphrase must be between %d and %d characters long\n",
+			WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN));
+			return -1;
+		}
+
+		
+		if (key_len < WSEC_MAX_PSK_LEN) {
+			unsigned char output[2*SHA1HashSize];
+			char key_str_buf[WSEC_MAX_PSK_LEN+1];
+			bzero(output, 2*SHA1HashSize);
+
+			WL_SOFTAP(("%s: do passhash...\n", __FUNCTION__));
+			
+			pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32);
+			
+			ptr = key_str_buf;
+			for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) {
+				WL_SOFTAP(("[%02d]: %08x\n", i, *((unsigned int*)&output[i*4])));
+				
+				sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4],
+					(uint)output[i*4+1], (uint)output[i*4+2],
+				        (uint)output[i*4+3]);
+				ptr += 8;
+			}
+			printk("%s: passphase = %s\n", __FUNCTION__, key_str_buf);
+
+			psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN);
+			memcpy(psk.key, key_str_buf, psk.key_len);
+		} else {
+			psk.key_len = htod16((ushort) key_len);
+			memcpy(psk.key, ap->key, key_len);
+		}
+
+		psk.flags = htod16(WSEC_PASSPHRASE);
+		res |= dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk));
+
+		wpa_auth = WPA_AUTH_PSK;
+		res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth);
+
+		WL_SOFTAP((" wsec & auth set 'wpa-psk' (TKIP), result:&d %d\n", res));
+	}
+
+#ifdef AP_ONLY
+		ap_ssid.SSID_len = strlen(ap->ssid);
+		strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len);
+		res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &ap_ssid, sizeof(ap_ssid));
+		mpc = 0;
+		res |= dev_wlc_intvar_set(dev, "mpc", mpc);
+		if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) {
+			res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		}
+#endif
+	return res;
+}
+
+
+
+static int
+get_parameter_from_string(
+			char **str_ptr, const char *token,
+			int param_type, void  *dst, int param_max_len)
+{
+	char int_str[7] = "0";
+	int parm_str_len;
+	char  *param_str_begin;
+	char  *param_str_end;
+	char  *orig_str = *str_ptr;
+
+	if ((*str_ptr) && !strncmp(*str_ptr, token, strlen(token))) {
+
+		strsep(str_ptr, "=,"); 
+		param_str_begin = *str_ptr;
+		strsep(str_ptr, "=,"); 
+
+		if (*str_ptr == NULL) {
+			
+			parm_str_len = strlen(param_str_begin);
+		} else {
+			param_str_end = *str_ptr-1;  
+			parm_str_len = param_str_end - param_str_begin;
+		}
+
+		WL_TRACE((" 'token:%s', len:%d, ", token, parm_str_len));
+
+		if (parm_str_len > param_max_len) {
+			WL_ERROR((" WARNING: extracted param len:%d is > MAX:%d\n",
+				parm_str_len, param_max_len));
+
+			parm_str_len = param_max_len;
+		}
+
+		switch (param_type) {
+
+			case PTYPE_INTDEC: {
+			
+				int *pdst_int = dst;
+				char *eptr;
+
+				if (parm_str_len > sizeof(int_str))
+					 parm_str_len = sizeof(int_str);
+
+				memcpy(int_str, param_str_begin, parm_str_len);
+
+				*pdst_int = simple_strtoul(int_str, &eptr, 10);
+
+				WL_TRACE((" written as integer:%d\n",  *pdst_int));
+			}
+			break;
+			case PTYPE_STR_HEX: {
+				u8 *buf = dst;
+				
+				param_max_len = param_max_len >> 1;  
+				hstr_2_buf(param_str_begin, buf, param_max_len);
+				dhd_print_buf(buf, param_max_len, 0);
+			}
+			break;
+			default:
+				
+				memcpy(dst, param_str_begin, parm_str_len);
+				*((char *)dst + parm_str_len) = 0; 
+				WL_ERROR((" written as a string:%s\n", (char *)dst));
+			break;
+
+		}
+
+		return 0;
+	} else {
+		WL_ERROR(("\n %s: ERROR: can't find token:%s in str:%s \n",
+			__FUNCTION__, token, orig_str));
+
+	 return -1;
+	}
+}
+
+static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac)
+{
+	int i;
+	int res = 0;
+	char mac_buf[128] = {0};
+	char z_mac[6] = {0, 0, 0, 0, 0, 0};
+	char *sta_mac;
+	struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+	bool deauth_all = false;
+
+	
+	if (mac == NULL) {
+		deauth_all = true;
+		sta_mac = z_mac;  
+	} else {
+		sta_mac = mac;  
+	}
+
+	memset(assoc_maclist, 0, sizeof(mac_buf));
+	assoc_maclist->count = 8; 
+
+	res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 128);
+	if (res != 0) {
+		WL_SOFTAP(("%s: Error:%d Couldn't get ASSOC List\n", __FUNCTION__, res));
+		return res;
+	}
+
+	if (assoc_maclist->count)
+		for (i = 0; i < assoc_maclist->count; i++) {
+		scb_val_t scbval;
+		scbval.val = htod32(1);
+		
+		bcopy(&assoc_maclist->ea[i], &scbval.ea, ETHER_ADDR_LEN);
+
+		if (deauth_all || (memcmp(&scbval.ea, sta_mac, ETHER_ADDR_LEN) == 0))  {
+			
+			WL_SOFTAP(("%s, deauth STA:%d \n", __FUNCTION__, i));
+			res |= dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+				&scbval, sizeof(scb_val_t));
+		}
+	} else WL_SOFTAP(("%s: No Stations \n", __FUNCTION__));
+
+	if (res != 0) {
+		WL_ERROR(("%s: Error:%d\n", __FUNCTION__, res));
+	} else if (assoc_maclist->count) {
+		
+		bcm_mdelay(200);
+	}
+	return res;
+}
+
+
+
+static int
+iwpriv_softap_stop(struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *ext)
+{
+	int res = 0;
+
+	WL_SOFTAP(("got iwpriv AP_BSS_STOP \n"));
+
+	if ((!dev) && (!ap_net_dev)) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return res;
+	}
+
+	net_os_wake_lock(dev);
+	DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+	if  ((ap_cfg_running == TRUE)) {
+#ifdef AP_ONLY
+		 wl_iw_softap_deassoc_stations(dev, NULL);
+#else
+		 wl_iw_softap_deassoc_stations(ap_net_dev, NULL);
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 2)) < 0)
+			WL_ERROR(("%s failed to del BSS err = %d", __FUNCTION__, res));
+#endif
+
+		
+		bcm_mdelay(100);
+
+		wrqu->data.length = 0;
+		ap_cfg_running = FALSE;
+	} else
+		WL_ERROR(("%s: was called when SoftAP is OFF : move on\n", __FUNCTION__));
+
+	WL_SOFTAP(("%s Done with %d\n", __FUNCTION__, res));
+	DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+	net_os_wake_unlock(dev);
+
+	return res;
+}
+
+
+
+static int
+iwpriv_fw_reload(struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *ext)
+{
+	int ret = -1;
+	char extra[256];
+	char *fwstr = fw_path ; 
+
+	WL_SOFTAP(("current firmware_path[]=%s\n", fwstr));
+
+	WL_TRACE((">Got FW_RELOAD cmd:"
+	          "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d, "
+	          "fw_path:%p, len:%d \n",
+	          info->cmd, info->flags,
+	          wrqu->data.pointer, wrqu->data.length, fwstr, strlen(fwstr)));
+
+	if ((wrqu->data.length > 4) && (wrqu->data.length < sizeof(extra))) {
+		char *str_ptr;
+
+		if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) {
+			ret = -EFAULT;
+			goto exit_proc;
+		}
+
+		
+		extra[wrqu->data.length] = 8;
+		str_ptr = extra;
+
+		if (get_parameter_from_string(&str_ptr,
+		                              "FW_PATH=", PTYPE_STRING, fwstr, 255) != 0) {
+			WL_ERROR(("Error: extracting FW_PATH='' string\n"));
+			goto exit_proc;
+		}
+
+		if  (strstr(fwstr, "apsta") != NULL) {
+			  WL_SOFTAP(("GOT APSTA FIRMWARE\n"));
+			  ap_fw_loaded = TRUE;
+		} else {
+			WL_SOFTAP(("GOT STA FIRMWARE\n"));
+			ap_fw_loaded = FALSE;
+		}
+
+		WL_SOFTAP(("SET firmware_path[]=%s , str_p:%p\n", fwstr, fwstr));
+		ret = 0;
+	} else {
+		WL_ERROR(("Error: ivalid param len:%d\n", wrqu->data.length));
+	}
+
+exit_proc:
+	return ret;
+}
+
+#ifdef SOFTAP
+
+static int
+iwpriv_wpasupp_loop_tst(struct net_device *dev,
+            struct iw_request_info *info,
+            union iwreq_data *wrqu,
+            char *ext)
+{
+	int res = 0;
+	char *params = NULL;
+
+	WL_TRACE((">Got IWPRIV  wp_supp loopback cmd test:"
+	          "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n",
+	          info->cmd, info->flags,
+	          wrqu->data.pointer, wrqu->data.length));
+
+	if (wrqu->data.length != 0) {
+
+		if (!(params = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
+			return -ENOMEM;
+
+
+		if (copy_from_user(params, wrqu->data.pointer, wrqu->data.length)) {
+			kfree(params);
+			return -EFAULT;
+		}
+
+		params[wrqu->data.length] = 0;
+		WL_SOFTAP(("\n>> copied from user:\n %s\n", params));
+	} else {
+		WL_ERROR(("ERROR param length is 0\n"));
+		return -EFAULT;
+	}
+
+	
+	res = wl_iw_send_priv_event(dev, params);
+	kfree(params);
+
+	return res;
+}
+#endif 
+
+
+static int
+iwpriv_en_ap_bss(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *wrqu,
+	char *extra)
+{
+	int res = 0;
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -1;
+	}
+
+	net_os_wake_lock(dev);
+	DHD_OS_MUTEX_LOCK(&wl_softap_lock);
+
+	WL_TRACE(("%s: rcvd IWPRIV IOCTL:  for dev:%s\n", __FUNCTION__, dev->name));
+
+	
+#ifndef AP_ONLY
+	if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) {
+		WL_ERROR((" %s ERROR setting SOFTAP security in :%d\n", __FUNCTION__, res));
+	}
+	else {
+		
+		if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0)
+			WL_ERROR(("%s fail to set bss up err=%d\n", __FUNCTION__, res));
+		else
+			
+			bcm_mdelay(100);
+	}
+
+#endif 
+	WL_SOFTAP(("%s done with res %d \n", __FUNCTION__, res));
+
+	DHD_OS_MUTEX_UNLOCK(&wl_softap_lock);
+	net_os_wake_unlock(dev);
+
+	return res;
+}
+
+static int
+get_assoc_sta_list(struct net_device *dev, char *buf, int len)
+{
+	
+	WL_TRACE(("%s: dev_wlc_ioctl(dev:%p, cmd:%d, buf:%p, len:%d)\n",
+		__FUNCTION__, dev, WLC_GET_ASSOCLIST, buf, len));
+
+	return dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, buf, len);
+
+}
+
+
+void check_error(int res, const char *msg, const char *func, int line)
+{
+	if (res != 0)
+		WL_ERROR(("%s, %d function:%s, line:%d\n", msg, res, func, line));
+}
+
+static int
+set_ap_mac_list(struct net_device *dev, void *buf)
+{
+	struct mac_list_set *mac_list_set = (struct mac_list_set *)buf;
+	struct maclist *maclist = (struct maclist *)&mac_list_set->mac_list;
+	int length;
+	int i;
+	int mac_mode = mac_list_set->mode;
+	int ioc_res = 0;
+	ap_macmode = mac_list_set->mode;  
+
+	
+	bzero(&ap_black_list, sizeof(struct mflist));
+
+	if (mac_mode == MACLIST_MODE_DISABLED) {
+		
+		ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+		check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+		WL_SOFTAP(("%s: MAC filtering disabled\n", __FUNCTION__));
+	} else {
+		
+		scb_val_t scbval;
+		char mac_buf[256] = {0};
+		struct maclist *assoc_maclist = (struct maclist *) mac_buf;
+
+		
+		bcopy(maclist, &ap_black_list, sizeof(ap_black_list));
+
+		
+		ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode));
+		check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+
+		
+		length = sizeof(maclist->count) + maclist->count*ETHER_ADDR_LEN;
+		dev_wlc_ioctl(dev, WLC_SET_MACLIST, maclist, length);
+
+		WL_SOFTAP(("%s: applied MAC List, mode:%d, length %d:\n",
+			__FUNCTION__, mac_mode, length));
+
+		for (i = 0; i < maclist->count; i++)
+			WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n",
+				i, maclist->ea[i].octet[0], maclist->ea[i].octet[1],
+				maclist->ea[i].octet[2],
+				maclist->ea[i].octet[3], maclist->ea[i].octet[4],
+				maclist->ea[i].octet[5]));
+
+		
+		assoc_maclist->count = 8;
+		ioc_res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 256);
+		check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__);
+		WL_SOFTAP((" Cur assoc clients:%d\n", assoc_maclist->count));
+
+		
+		if (assoc_maclist->count)
+			for (i = 0; i < assoc_maclist->count; i++) {
+				int j;
+				bool assoc_mac_matched = false;
+				
+				WL_SOFTAP(("\n Cheking assoc STA: "));
+				dhd_print_buf(&assoc_maclist->ea[i], 6, 7);
+				WL_SOFTAP(("with the b/w list:"));
+
+				for (j = 0; j < maclist->count; j++)
+					if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j],
+						ETHER_ADDR_LEN)) {
+						
+						assoc_mac_matched = true;
+						break;
+					}
+
+				
+				if (((mac_mode == MACLIST_MODE_ALLOW) && !assoc_mac_matched) ||
+					((mac_mode == MACLIST_MODE_DENY) && assoc_mac_matched)) {
+
+					WL_SOFTAP(("b-match or w-mismatch,"
+								" do deauth/disassoc \n"));
+							scbval.val = htod32(1);
+							bcopy(&assoc_maclist->ea[i], &scbval.ea,
+							ETHER_ADDR_LEN);
+							ioc_res = dev_wlc_ioctl(dev,
+								WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+								&scbval, sizeof(scb_val_t));
+							check_error(ioc_res,
+								"ioctl ERROR:",
+								__FUNCTION__, __LINE__);
+
+				} else {
+					WL_SOFTAP((" no b/w list hits, let it be\n"));
+				}
+		} else {
+			WL_SOFTAP(("No ASSOC CLIENTS\n"));
+		} 
+
+	} 
+
+	WL_SOFTAP(("%s iocres:%d\n", __FUNCTION__, ioc_res));
+	return ioc_res;
+}
+#endif 
+
+
+
+#ifdef SOFTAP
+#define PARAM_OFFSET PROFILE_OFFSET
+
+static int
+wl_iw_process_private_ascii_cmd(
+			struct net_device *dev,
+			struct iw_request_info *info,
+			union iwreq_data *dwrq,
+			char *cmd_str)
+{
+	int ret = 0;
+	char *sub_cmd = cmd_str + PROFILE_OFFSET + strlen("ASCII_CMD=");
+
+	WL_SOFTAP(("\n %s: ASCII_CMD: offs_0:%s, offset_32:\n'%s'\n",
+		__FUNCTION__, cmd_str, cmd_str + PROFILE_OFFSET));
+
+	if (strnicmp(sub_cmd, "AP_CFG", strlen("AP_CFG")) == 0) {
+
+		WL_SOFTAP((" AP_CFG \n"));
+
+		
+		if (init_ap_profile_from_string(cmd_str+PROFILE_OFFSET, &my_ap) != 0) {
+				WL_ERROR(("ERROR: SoftAP CFG prams !\n"));
+				ret = -1;
+		} else {
+			ret = set_ap_cfg(dev, &my_ap);
+		}
+
+	} else if (strnicmp(sub_cmd, "AP_BSS_START", strlen("AP_BSS_START")) == 0) {
+
+		WL_SOFTAP(("\n SOFTAP - ENABLE BSS \n"));
+
+		
+		WL_SOFTAP(("\n!!! got 'WL_AP_EN_BSS' from WPA supplicant, dev:%s\n", dev->name));
+
+#ifndef AP_ONLY
+		if (ap_net_dev == NULL) {
+				 printf("\n ERROR: SOFTAP net_dev* is NULL !!!\n");
+		} else {
+			  
+			if ((ret = iwpriv_en_ap_bss(ap_net_dev, info, dwrq, cmd_str)) < 0)
+				WL_ERROR(("%s line %d fail to set bss up\n",
+					__FUNCTION__, __LINE__));
+		}
+#else
+		if ((ret = iwpriv_en_ap_bss(dev, info, dwrq, cmd_str)) < 0)
+				WL_ERROR(("%s line %d fail to set bss up\n",
+					__FUNCTION__, __LINE__));
+#endif
+	} else if (strnicmp(sub_cmd, "ASSOC_LST", strlen("ASSOC_LST")) == 0) {
+
+		
+
+	} else if (strnicmp(sub_cmd, "AP_BSS_STOP", strlen("AP_BSS_STOP")) == 0) {
+
+		WL_SOFTAP((" \n temp DOWN SOFTAP\n"));
+#ifndef AP_ONLY
+		if ((ret = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) {
+				WL_ERROR(("%s line %d fail to set bss down\n",
+					__FUNCTION__, __LINE__));
+		}
+#endif
+	}
+
+	return ret;
+
+}
+#endif 
+
+
+static int
+wl_iw_set_priv(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *ext
+)
+{
+	int ret = 0;
+	char * extra;
+
+	if (!(extra = kmalloc(dwrq->length, GFP_KERNEL)))
+	    return -ENOMEM;
+
+	if (copy_from_user(extra, dwrq->pointer, dwrq->length)) {
+	    kfree(extra);
+	    return -EFAULT;
+	}
+
+	WL_TRACE(("%s: SIOCSIWPRIV request %s, info->cmd:%x, info->flags:%d\n dwrq->length:%d\n",
+		dev->name, extra, info->cmd, info->flags, dwrq->length));
+
+	
+
+	net_os_wake_lock(dev);
+
+	if (dwrq->length && extra) {
+		if (strnicmp(extra, "START", strlen("START")) == 0) {
+			wl_iw_control_wl_on(dev, info);
+			WL_TRACE(("%s, Received regular START command\n", __FUNCTION__));
+		}
+
+		if (g_onoff == G_WLAN_SET_OFF) {
+			WL_TRACE(("%s, missing START, Fail\n", __FUNCTION__));
+			kfree(extra);
+			net_os_wake_unlock(dev);
+			return -EFAULT;
+		}
+
+	    if (strnicmp(extra, "SCAN-ACTIVE", strlen("SCAN-ACTIVE")) == 0) {
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+			WL_TRACE(("%s: active scan setting suppressed\n", dev->name));
+#else
+			ret = wl_iw_set_active_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif 
+	    }
+	    else if (strnicmp(extra, "SCAN-PASSIVE", strlen("SCAN-PASSIVE")) == 0)
+#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS
+			WL_TRACE(("%s: passive scan setting suppressed\n", dev->name));
+#else
+			ret = wl_iw_set_passive_scan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif 
+	    else if (strnicmp(extra, "RSSI", strlen("RSSI")) == 0)
+			ret = wl_iw_get_rssi(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "LINKSPEED", strlen("LINKSPEED")) == 0)
+			ret = wl_iw_get_link_speed(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "MACADDR", strlen("MACADDR")) == 0)
+			ret = wl_iw_get_macaddr(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "COUNTRY", strlen("COUNTRY")) == 0)
+			ret = wl_iw_set_country(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "STOP", strlen("STOP")) == 0)
+			ret = wl_iw_control_wl_off(dev, info);
+	    else if (strnicmp(extra, BAND_GET_CMD, strlen(BAND_GET_CMD)) == 0)
+			ret = wl_iw_get_band(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, BAND_SET_CMD, strlen(BAND_SET_CMD)) == 0)
+			ret = wl_iw_set_band(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, DTIM_SKIP_GET_CMD, strlen(DTIM_SKIP_GET_CMD)) == 0)
+			ret = wl_iw_get_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, DTIM_SKIP_SET_CMD, strlen(DTIM_SKIP_SET_CMD)) == 0)
+			ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, SETSUSPEND_CMD, strlen(SETSUSPEND_CMD)) == 0)
+			ret = wl_iw_set_suspend(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, TXPOWER_SET_CMD, strlen(TXPOWER_SET_CMD)) == 0)
+			ret = wl_iw_set_txpower(dev, info, (union iwreq_data *)dwrq, extra);
+#if defined(PNO_SUPPORT)
+	    else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0)
+			ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, PNOSETUP_SET_CMD, strlen(PNOSETUP_SET_CMD)) == 0)
+			ret = wl_iw_set_pno_set(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, PNOENABLE_SET_CMD, strlen(PNOENABLE_SET_CMD)) == 0)
+			ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra);
+#endif 
+#if defined(CSCAN)
+	    
+	    else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0)
+			ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra);
+#endif 
+	    else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0)
+			ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0)
+			ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra);
+	    else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0)
+			ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra);
+#ifdef SOFTAP
+	    else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) {
+	        
+		    wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra);
+	    }
+		else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) {
+			WL_SOFTAP(("penguin, set AP_MAC_LIST_SET\n"));
+			set_ap_mac_list(dev, (extra + PROFILE_OFFSET));
+	    }
+#endif 
+	    else {
+			WL_ERROR(("Unknown PRIVATE command %s - ignored\n", extra));
+			snprintf(extra, MAX_WX_STRING, "OK");
+			dwrq->length = strlen("OK") + 1;
+		}
+	}
+
+	net_os_wake_unlock(dev);
+
+	if (extra) {
+	    if (copy_to_user(dwrq->pointer, extra, dwrq->length)) {
+			kfree(extra);
+			return -EFAULT;
+	    }
+
+	    kfree(extra);
+	}
+
+	return ret;
+}
+
+static const iw_handler wl_iw_handler[] =
+{
+	(iw_handler) wl_iw_config_commit,	
+	(iw_handler) wl_iw_get_name,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_freq,		
+	(iw_handler) wl_iw_get_freq,		
+	(iw_handler) wl_iw_set_mode,		
+	(iw_handler) wl_iw_get_mode,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_get_range,		
+	(iw_handler) wl_iw_set_priv,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_spy,		
+	(iw_handler) wl_iw_get_spy,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_wap,		
+	(iw_handler) wl_iw_get_wap,		
+#if WIRELESS_EXT > 17
+	(iw_handler) wl_iw_mlme,		
+#else
+	(iw_handler) NULL,			
+#endif
+#if defined(WL_IW_USE_ISCAN)
+	(iw_handler) wl_iw_iscan_get_aplist,	
+#else
+	(iw_handler) wl_iw_get_aplist,		
+#endif 
+#if WIRELESS_EXT > 13
+#if defined(WL_IW_USE_ISCAN)
+	(iw_handler) wl_iw_iscan_set_scan,	
+	(iw_handler) wl_iw_iscan_get_scan,	
+#else
+	(iw_handler) wl_iw_set_scan,		
+	(iw_handler) wl_iw_get_scan,		
+#endif
+#else	
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+#endif	
+	(iw_handler) wl_iw_set_essid,		
+	(iw_handler) wl_iw_get_essid,		
+	(iw_handler) wl_iw_set_nick,		
+	(iw_handler) wl_iw_get_nick,		
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_rate,		
+	(iw_handler) wl_iw_get_rate,		
+	(iw_handler) wl_iw_set_rts,		
+	(iw_handler) wl_iw_get_rts,		
+	(iw_handler) wl_iw_set_frag,		
+	(iw_handler) wl_iw_get_frag,		
+	(iw_handler) wl_iw_set_txpow,		
+	(iw_handler) wl_iw_get_txpow,		
+#if WIRELESS_EXT > 10
+	(iw_handler) wl_iw_set_retry,		
+	(iw_handler) wl_iw_get_retry,		
+#endif 
+	(iw_handler) wl_iw_set_encode,		
+	(iw_handler) wl_iw_get_encode,		
+	(iw_handler) wl_iw_set_power,		
+	(iw_handler) wl_iw_get_power,		
+#if WIRELESS_EXT > 17
+	(iw_handler) NULL,			
+	(iw_handler) NULL,			
+	(iw_handler) wl_iw_set_wpaie,		
+	(iw_handler) wl_iw_get_wpaie,		
+	(iw_handler) wl_iw_set_wpaauth,		
+	(iw_handler) wl_iw_get_wpaauth,		
+	(iw_handler) wl_iw_set_encodeext,	
+	(iw_handler) wl_iw_get_encodeext,	
+	(iw_handler) wl_iw_set_pmksa,			
+#endif 
+};
+
+#if WIRELESS_EXT > 12
+static const iw_handler wl_iw_priv_handler[] = {
+	NULL,
+	(iw_handler)wl_iw_set_active_scan,
+	NULL,
+	(iw_handler)wl_iw_get_rssi,
+	NULL,
+	(iw_handler)wl_iw_set_passive_scan,
+	NULL,
+	(iw_handler)wl_iw_get_link_speed,
+	NULL,
+	(iw_handler)wl_iw_get_macaddr,
+	NULL,
+	(iw_handler)wl_iw_control_wl_off,
+	NULL,
+	(iw_handler)wl_iw_control_wl_on,
+#ifdef SOFTAP       
+
+	
+	NULL,
+	(iw_handler)iwpriv_set_ap_config,
+
+	
+	
+	NULL,
+	(iw_handler)iwpriv_get_assoc_list,
+
+	
+	NULL,
+	(iw_handler)iwpriv_set_mac_filters,
+
+	
+	NULL,
+	(iw_handler)iwpriv_en_ap_bss,
+
+	
+	NULL,
+	(iw_handler)iwpriv_wpasupp_loop_tst,
+	
+	NULL,
+	(iw_handler)iwpriv_softap_stop,
+	
+	NULL,
+	(iw_handler)iwpriv_fw_reload,
+	NULL, 
+	(iw_handler)iwpriv_set_ap_sta_disassoc,
+#endif 
+#if defined(CSCAN)
+	
+	NULL,
+	(iw_handler)iwpriv_set_cscan
+#endif 	
+};
+
+static const struct iw_priv_args wl_iw_priv_args[] =
+{
+	{	
+		WL_IW_SET_ACTIVE_SCAN,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"SCAN-ACTIVE"
+	},
+	{
+		WL_IW_GET_RSSI,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"RSSI"
+	},
+	{
+		WL_IW_SET_PASSIVE_SCAN,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"SCAN-PASSIVE"
+	},
+	{
+		WL_IW_GET_LINK_SPEED,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"LINKSPEED"
+	},
+	{
+		WL_IW_GET_CURR_MACADDR,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"Macaddr"
+	},
+	{
+		WL_IW_SET_STOP,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"STOP"
+	},
+	{
+		WL_IW_SET_START,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"START"
+	},
+
+#ifdef SOFTAP
+	
+	
+	{
+		WL_SET_AP_CFG,
+		IW_PRIV_TYPE_CHAR |  256,      
+		0,
+		"AP_SET_CFG"
+	},
+
+	{
+		WL_AP_STA_LIST,
+		0,                     
+		IW_PRIV_TYPE_CHAR | 0, 
+		"AP_GET_STA_LIST"
+	},
+
+	{
+		WL_AP_MAC_FLTR,
+		IW_PRIV_TYPE_CHAR | 256,                      
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,    
+		"AP_SET_MAC_FLTR"
+	},
+
+	{ 
+		WL_AP_BSS_START,
+		0,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		"AP_BSS_START"
+	},
+
+	{
+		AP_LPB_CMD,
+		IW_PRIV_TYPE_CHAR | 256,   
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,    
+		"AP_LPB_CMD"
+	},
+
+	{ 
+		WL_AP_STOP,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,   
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,   
+		"AP_BSS_STOP"
+	},
+	{ 
+		WL_FW_RELOAD,
+		IW_PRIV_TYPE_CHAR | 256,
+		IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0,
+		"WL_FW_RELOAD"
+	},
+#endif 
+#if defined(CSCAN)
+	{ 
+		WL_COMBO_SCAN,
+		IW_PRIV_TYPE_CHAR | 1024,  
+		0,
+		"CSCAN"
+	},
+#endif 
+	};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+	.num_standard = ARRAYSIZE(wl_iw_handler),
+	.standard = (iw_handler *) wl_iw_handler,
+	.num_private = ARRAYSIZE(wl_iw_priv_handler),
+	.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+	.private = (iw_handler *)wl_iw_priv_handler,
+	.private_args = (void *) wl_iw_priv_args,
+
+#if WIRELESS_EXT >= 19
+	get_wireless_stats: dhd_get_wireless_stats,
+#endif 
+	};
+#endif 
+
+
+
+int
+wl_iw_ioctl(
+	struct net_device *dev,
+	struct ifreq *rq,
+	int cmd
+)
+{
+	struct iwreq *wrq = (struct iwreq *) rq;
+	struct iw_request_info info;
+	iw_handler handler;
+	char *extra = NULL;
+	size_t token_size = 1;
+	int max_tokens = 0, ret = 0;
+
+	net_os_wake_lock(dev);
+
+	WL_TRACE(("\n%s, cmd:%x called via dhd->do_ioctl()entry point\n", __FUNCTION__, cmd));
+	if (cmd < SIOCIWFIRST ||
+		IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+		!(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) {
+			WL_ERROR(("%s: error in cmd=%x : not supported\n", __FUNCTION__, cmd));
+			net_os_wake_unlock(dev);
+			return -EOPNOTSUPP;
+	}
+
+	switch (cmd) {
+
+	case SIOCSIWESSID:
+	case SIOCGIWESSID:
+	case SIOCSIWNICKN:
+	case SIOCGIWNICKN:
+		max_tokens = IW_ESSID_MAX_SIZE + 1;
+		break;
+
+	case SIOCSIWENCODE:
+	case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+	case SIOCSIWENCODEEXT:
+	case SIOCGIWENCODEEXT:
+#endif
+		max_tokens = wrq->u.data.length;
+		break;
+
+	case SIOCGIWRANGE:
+		
+		max_tokens = sizeof(struct iw_range) + 500;
+		break;
+
+	case SIOCGIWAPLIST:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_AP;
+		break;
+
+#if WIRELESS_EXT > 13
+	case SIOCGIWSCAN:
+#if defined(WL_IW_USE_ISCAN)
+	if (g_iscan)
+		max_tokens = wrq->u.data.length;
+	else
+#endif
+		max_tokens = IW_SCAN_MAX_DATA;
+		break;
+#endif 
+
+	case SIOCSIWSPY:
+		token_size = sizeof(struct sockaddr);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+	case SIOCGIWSPY:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+#if WIRELESS_EXT > 17
+	case SIOCSIWPMKSA:
+	case SIOCSIWGENIE:
+#endif 
+	case SIOCSIWPRIV:
+		max_tokens = wrq->u.data.length;
+		break;
+	}
+
+	if (max_tokens && wrq->u.data.pointer) {
+		if (wrq->u.data.length > max_tokens) {
+			WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d  > max_tokens=%d\n",
+				__FUNCTION__, cmd, wrq->u.data.length, max_tokens));
+			ret = -E2BIG;
+			goto wl_iw_ioctl_done;
+		}
+		if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) {
+			ret = -ENOMEM;
+			goto wl_iw_ioctl_done;
+		}
+
+		if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			ret = -EFAULT;
+			goto wl_iw_ioctl_done;
+		}
+	}
+
+	info.cmd = cmd;
+	info.flags = 0;
+
+	ret = handler(dev, &info, &wrq->u, extra);
+
+	if (extra) {
+		if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			ret = -EFAULT;
+			goto wl_iw_ioctl_done;
+		}
+
+		kfree(extra);
+	}
+
+wl_iw_ioctl_done:
+
+	net_os_wake_unlock(dev);
+
+	return ret;
+}
+
+
+static bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen)
+{
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			
+		uint32 inStatus;		
+		uint32 inReason;		
+		const char* outName;	
+		const char* outCause;	
+	} conn_fail_event_map_t;
+
+	
+#define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map [] = {
+		
+		
+		{WLC_E_SET_SSID,     WLC_E_STATUS_SUCCESS,   WL_IW_DONT_CARE,
+		"Conn", "Success"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+		"Conn", "NoNetworks"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ConfigMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_PRUNE_ENCR_MISMATCH,
+		"Conn", "EncrypMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_RSN_MISMATCH,
+		"Conn", "RsnMismatch"},
+		{WLC_E_AUTH,         WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "AuthTimeout"},
+		{WLC_E_AUTH,         WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "AuthFail"},
+		{WLC_E_AUTH,         WLC_E_STATUS_NO_ACK,    WL_IW_DONT_CARE,
+		"Conn", "AuthNoAck"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ReassocFail"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "ReassocTimeout"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_ABORT,     WL_IW_DONT_CARE,
+		"Conn", "ReassocAbort"},
+		{WLC_E_PSK_SUP,      WLC_SUP_KEYED,          WL_IW_DONT_CARE,
+		"Sup", "ConnSuccess"},
+		{WLC_E_PSK_SUP,      WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Sup", "WpaHandshakeFail"},
+		{WLC_E_DEAUTH_IND,   WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Deauth"},
+		{WLC_E_DISASSOC_IND, WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "DisassocInd"},
+		{WLC_E_DISASSOC,     WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Disassoc"}
+	};
+
+	const char* name = "";
+	const char* cause = NULL;
+	int i;
+
+	
+	for (i = 0;  i < sizeof(event_map)/sizeof(event_map[0]);  i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			name = row->outName;
+			cause = row->outCause;
+			break;
+		}
+	}
+
+	
+	if (cause) {
+		memset(stringBuf, 0, buflen);
+		snprintf(stringBuf, buflen, "%s %s %02d %02d",
+			name, cause, status, reason);
+		WL_INFORM(("Connection status: %s\n", stringBuf));
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#if WIRELESS_EXT > 14
+
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+	uint32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+
+	if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+		return TRUE;
+	}
+	else
+		return FALSE;
+}
+#endif 
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 
+#endif 
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd = 0;
+	uint32 event_type = ntoh32(e->event_type);
+	uint16 flags =  ntoh16(e->flags);
+	uint32 datalen = ntoh32(e->datalen);
+	uint32 status =  ntoh32(e->status);
+	uint32 toto;
+	static  uint32 roam_no_success = 0;
+	static bool roam_no_success_send = FALSE;
+	memset(&wrqu, 0, sizeof(wrqu));
+	memset(extra, 0, sizeof(extra));
+
+	if (!dev) {
+		WL_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return;
+	}
+
+	net_os_wake_lock(dev);
+
+	WL_TRACE(("%s: dev=%s event=%d \n", __FUNCTION__, dev->name, event_type));
+
+	
+	switch (event_type) {
+#if defined(SOFTAP)
+	case WLC_E_PRUNE:
+		if (ap_cfg_running) {
+			char *macaddr = (char *)&e->addr;
+			WL_SOFTAP(("PRUNE received, %02X:%02X:%02X:%02X:%02X:%02X!\n",
+				macaddr[0], macaddr[1], macaddr[2], macaddr[3],
+				macaddr[4], macaddr[5]));
+
+			
+			if (ap_macmode)
+			{
+				int i;
+				for (i = 0; i < ap_black_list.count; i++) {
+					if (!bcmp(macaddr, &ap_black_list.ea[i],
+						sizeof(struct ether_addr))) {
+						WL_SOFTAP(("mac in black list, ignore it\n"));
+						break;
+					}
+				}
+
+				if (i == ap_black_list.count) {
+					
+					char mac_buf[32] = {0};
+					sprintf(mac_buf, "STA_BLOCK %02X:%02X:%02X:%02X:%02X:%02X",
+						macaddr[0], macaddr[1], macaddr[2],
+						macaddr[3], macaddr[4], macaddr[5]);
+					wl_iw_send_priv_event(priv_dev, mac_buf);
+				}
+			}
+		}
+		break;
+#endif 
+	case WLC_E_TXFAIL:
+		cmd = IWEVTXDROP;
+		memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		break;
+#if WIRELESS_EXT > 14
+	case WLC_E_JOIN:
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+#if defined(SOFTAP)
+		WL_SOFTAP(("STA connect received %d\n", event_type));
+		if (ap_cfg_running) {
+			wl_iw_send_priv_event(priv_dev, "STA_JOIN");
+			goto wl_iw_event_end;
+		}
+#endif 
+		memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		cmd = IWEVREGISTERED;
+		break;
+	case WLC_E_ROAM:
+		if (status != WLC_E_STATUS_SUCCESS) {
+			roam_no_success++;
+			if ((roam_no_success == 3) && (roam_no_success_send == FALSE)) {
+				
+				roam_no_success_send = TRUE;
+				bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+				bzero(&extra, ETHER_ADDR_LEN);
+				cmd = SIOCGIWAP;
+				WL_ERROR(("%s  ROAMING did not succeeded , send Link Down\n",
+					__FUNCTION__));
+			} else {
+				WL_TRACE(("##### ROAMING did not succeeded %d\n", roam_no_success));
+				goto wl_iw_event_end;
+			}
+		} else {
+			memcpy(wrqu.addr.sa_data, &e->addr.octet, ETHER_ADDR_LEN);
+			wrqu.addr.sa_family = ARPHRD_ETHER;
+			cmd = SIOCGIWAP;
+		}
+	break;
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+#if defined(SOFTAP)
+		WL_SOFTAP(("STA disconnect received %d\n", event_type));
+		if (ap_cfg_running) {
+			wl_iw_send_priv_event(priv_dev, "STA_LEAVE");
+			goto wl_iw_event_end;
+		}
+#endif 
+		cmd = SIOCGIWAP;
+		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		bzero(&extra, ETHER_ADDR_LEN);
+		break;
+	case WLC_E_LINK:
+	case WLC_E_NDIS_LINK:
+		cmd = SIOCGIWAP;
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			
+			
+#ifdef SOFTAP
+#ifdef AP_ONLY
+		if (ap_cfg_running) {
+#else
+		if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif	
+			
+			WL_SOFTAP(("AP DOWN %d\n", event_type));
+			wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+		} else {
+			WL_TRACE(("STA_Link Down\n"));
+			g_ss_cache_ctrl.m_link_down = 1;
+		}
+#else		
+		g_ss_cache_ctrl.m_link_down = 1;
+#endif 
+			WL_TRACE(("Link Down\n"));
+
+			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+			bzero(&extra, ETHER_ADDR_LEN);
+		}
+		else {
+			
+			memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+			g_ss_cache_ctrl.m_link_down = 0;
+			
+			memcpy(g_ss_cache_ctrl.m_active_bssid, &e->addr, ETHER_ADDR_LEN);
+#ifdef SOFTAP
+
+#ifdef AP_ONLY
+			if (ap_cfg_running) {
+#else
+			if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) {
+#endif
+			
+				WL_SOFTAP(("AP UP %d\n", event_type));
+				wl_iw_send_priv_event(priv_dev, "AP_UP");
+			} else {
+				WL_TRACE(("STA_LINK_UP\n"));
+				roam_no_success_send = FALSE;
+				roam_no_success = 0;
+			}
+#else
+#endif 
+			WL_TRACE(("Link UP\n"));
+
+		}
+		net_os_wake_lock_timeout_enable(dev);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		break;
+	case WLC_E_ACTION_FRAME:
+		cmd = IWEVCUSTOM;
+		if (datalen + 1 <= sizeof(extra)) {
+			wrqu.data.length = datalen + 1;
+			extra[0] = WLC_E_ACTION_FRAME;
+			memcpy(&extra[1], data, datalen);
+			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+		}
+		break;
+
+	case WLC_E_ACTION_FRAME_COMPLETE:
+		cmd = IWEVCUSTOM;
+		memcpy(&toto, data, 4);
+		if (sizeof(status) + 1 <= sizeof(extra)) {
+			wrqu.data.length = sizeof(status) + 1;
+			extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+			memcpy(&extra[1], &status, sizeof(status));
+			printf("wl_iw_event status %d PacketId %d \n", status, toto);
+			printf("WLC_E_ACTION_FRAME_COMPLETE len %d \n", wrqu.data.length);
+		}
+		break;
+#endif 
+#if WIRELESS_EXT > 17
+	case WLC_E_MIC_ERROR: {
+		struct	iw_michaelmicfailure  *micerrevt = (struct  iw_michaelmicfailure  *)&extra;
+		cmd = IWEVMICHAELMICFAILURE;
+		wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+		if (flags & WLC_EVENT_MSG_GROUP)
+			micerrevt->flags |= IW_MICFAILURE_GROUP;
+		else
+			micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+		memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+		break;
+	}
+	case WLC_E_PMKID_CACHE: {
+		if (data)
+		{
+			struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+			pmkid_cand_list_t *pmkcandlist;
+			pmkid_cand_t	*pmkidcand;
+			int count;
+
+			cmd = IWEVPMKIDCAND;
+			pmkcandlist = data;
+			count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+			ASSERT(count >= 0);
+			wrqu.data.length = sizeof(struct iw_pmkid_cand);
+			pmkidcand = pmkcandlist->pmkid_cand;
+			while (count) {
+				bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+				if (pmkidcand->preauth)
+					iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+				bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+					ETHER_ADDR_LEN);
+#ifndef SANDGATE2G
+				wireless_send_event(dev, cmd, &wrqu, extra);
+#endif
+				pmkidcand++;
+				count--;
+			}
+		}
+		goto wl_iw_event_end;
+	}
+#endif 
+
+	case WLC_E_SCAN_COMPLETE:
+#if defined(WL_IW_USE_ISCAN)
+		if ((g_iscan) && (g_iscan->tsk_ctl.thr_pid >= 0) &&
+			(g_iscan->iscan_state != ISCAN_STATE_IDLE))
+		{
+			up(&g_iscan->tsk_ctl.sema);
+		} else {
+			cmd = SIOCGIWSCAN;
+			wrqu.data.length = strlen(extra);
+			WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific scan %d\n",
+				g_iscan->iscan_state));
+		}
+#else
+		cmd = SIOCGIWSCAN;
+		wrqu.data.length = strlen(extra);
+		WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n"));
+#endif 
+	break;
+
+	
+	case WLC_E_PFN_NET_FOUND:
+	{
+		wl_pfn_net_info_t *netinfo;
+		netinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) -
+		            sizeof(wl_pfn_net_info_t));
+		WL_ERROR(("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n",
+		   __FUNCTION__, PNO_EVENT_UP, netinfo->pfnsubnet.SSID,
+		   netinfo->pfnsubnet.SSID_len));
+		net_os_wake_lock_timeout_enable(dev);
+		cmd = IWEVCUSTOM;
+		memset(&wrqu, 0, sizeof(wrqu));
+		strcpy(extra, PNO_EVENT_UP);
+		wrqu.data.length = strlen(extra);
+	}
+	break;
+
+	default:
+		
+		WL_TRACE(("Unknown Event %d: ignoring\n", event_type));
+		break;
+	}
+#ifndef SANDGATE2G
+		if (cmd) {
+			if (cmd == SIOCGIWSCAN)
+				wireless_send_event(dev, cmd, &wrqu, NULL);
+			else
+				wireless_send_event(dev, cmd, &wrqu, extra);
+		}
+#endif
+
+#if WIRELESS_EXT > 14
+	
+	memset(extra, 0, sizeof(extra));
+	if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+		cmd = IWEVCUSTOM;
+		wrqu.data.length = strlen(extra);
+#ifndef SANDGATE2G
+		wireless_send_event(dev, cmd, &wrqu, extra);
+#endif
+	}
+#endif 
+
+	goto wl_iw_event_end;	
+wl_iw_event_end:
+
+	net_os_wake_unlock(dev);
+#endif 
+}
+
+int
+wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+	int res = 0;
+	wl_cnt_t cnt;
+	int phy_noise;
+	int rssi;
+	scb_val_t scb_val;
+
+	phy_noise = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+		goto done;
+
+	phy_noise = dtoh32(phy_noise);
+	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise));
+
+	bzero(&scb_val, sizeof(scb_val_t));
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+		goto done;
+
+	rssi = dtoh32(scb_val.val);
+	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi));
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		wstats->qual.qual = 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		wstats->qual.qual = 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		wstats->qual.qual = 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		wstats->qual.qual = 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		wstats->qual.qual = 4;
+	else
+		wstats->qual.qual = 5;
+
+	
+	wstats->qual.level = 0x100 + rssi;
+	wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+	wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+	wstats->qual.updated |= 7;
+#endif 
+
+#if WIRELESS_EXT > 11
+	WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n", (int)sizeof(wl_cnt_t)));
+
+	memset(&cnt, 0, sizeof(wl_cnt_t));
+	res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+	if (res)
+	{
+		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n", res));
+		goto done;
+	}
+
+	cnt.version = dtoh16(cnt.version);
+	if (cnt.version != WL_CNT_T_VERSION) {
+		WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+			WL_CNT_T_VERSION, cnt.version));
+		goto done;
+	}
+
+	wstats->discard.nwid = 0;
+	wstats->discard.code = dtoh32(cnt.rxundec);
+	wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+	wstats->discard.retries = dtoh32(cnt.txfail);
+	wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+	wstats->miss.beacon = 0;
+
+	WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+		dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif 
+
+done:
+	return res;
+}
+#if defined(COEX_DHCP)
+static void
+wl_iw_bt_flag_set(
+	struct net_device *dev,
+	bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+	char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+	char buf_flag7_default[8]   = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_lock();
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+	
+	set_btc_esco_params(dev, set);
+#endif
+
+
+#if defined(BT_DHCP_USE_FLAGS)
+	WL_TRACE_COEX(("WI-FI priority boost via bt flags, set:%d\n", set));
+	if (set == TRUE) {
+		
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_dhcp_on[0], sizeof(buf_flag7_dhcp_on));
+	}
+	else  {
+		
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+	}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	rtnl_unlock();
+#endif
+}
+
+static void
+wl_iw_bt_timerfunc(ulong data)
+{
+	bt_info_t  *bt_local = (bt_info_t *)data;
+	bt_local->timer_on = 0;
+	WL_TRACE(("%s\n", __FUNCTION__));
+	
+	up(&bt_local->tsk_ctl.sema);
+}
+
+static int
+_bt_dhcp_sysioc_thread(void *data)
+{
+	tsk_ctl_t *tsk_ctl =  (tsk_ctl_t *)data;
+
+	DAEMONIZE("dhcp_sysioc");
+
+	complete(&tsk_ctl->completed);
+
+	while (down_interruptible(&tsk_ctl->sema) == 0) {
+
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk_ctl->terminated) {
+			break;
+		}
+
+		if (g_bt->timer_on) {
+			g_bt->timer_on = 0;
+			del_timer_sync(&g_bt->timer);
+		}
+
+		switch (g_bt->bt_state) {
+			case BT_DHCP_START:
+				
+				WL_TRACE_COEX(("%s bt_dhcp stm: started \n", __FUNCTION__));
+				g_bt->bt_state = BT_DHCP_OPPORTUNITY_WINDOW;
+				mod_timer(&g_bt->timer,
+				          jiffies + BT_DHCP_OPPORTUNITY_WINDOW_TIME*HZ/1000);
+				g_bt->timer_on = 1;
+				break;
+
+			case BT_DHCP_OPPORTUNITY_WINDOW:
+				if 	(g_bt->dhcp_done) {
+					WL_TRACE_COEX(("%s DHCP Done before T1 expiration\n",
+						__FUNCTION__));
+					goto btc_coex_idle;
+				}
+
+				
+				WL_TRACE_COEX(("%s DHCP T1:%d expired\n",
+					__FUNCTION__, BT_DHCP_OPPORTUNITY_WINDOW_TIME));
+				
+				if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, TRUE);
+				g_bt->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+				mod_timer(&g_bt->timer, jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+				g_bt->timer_on = 1;
+				break;
+
+			case BT_DHCP_FLAG_FORCE_TIMEOUT:
+				if 	(g_bt->dhcp_done) {
+					WL_TRACE_COEX(("%s DHCP Done before T2 expiration\n",
+						__FUNCTION__));
+				} else  {
+					
+					WL_TRACE_COEX(("%s DHCP wait interval T2:%d msec expired\n",
+						__FUNCTION__, BT_DHCP_FLAG_FORCE_TIME));
+				}
+
+				
+				if (g_bt->dev)  wl_iw_bt_flag_set(g_bt->dev, FALSE);
+			btc_coex_idle:
+				g_bt->bt_state = BT_DHCP_IDLE;
+				g_bt->timer_on = 0;
+				break;
+
+			default:
+				WL_ERROR(("%s error g_status=%d !!!\n", __FUNCTION__,
+				          g_bt->bt_state));
+				if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE);
+				g_bt->bt_state = BT_DHCP_IDLE;
+				g_bt->timer_on = 0;
+				break;
+		 }
+
+		net_os_wake_unlock(g_bt->dev);
+	}
+
+	if (g_bt->timer_on) {
+		g_bt->timer_on = 0;
+		del_timer_sync(&g_bt->timer);
+	}
+	complete_and_exit(&tsk_ctl->completed, 0);
+}
+
+static void
+wl_iw_bt_release(void)
+{
+	bt_info_t *bt_local = g_bt;
+
+	if (!bt_local) {
+		return;
+	}
+
+	if (bt_local->tsk_ctl.thr_pid >= 0) {
+		PROC_STOP(&bt_local->tsk_ctl);
+	}
+	kfree(bt_local);
+	g_bt = NULL;
+}
+
+static int
+wl_iw_bt_init(struct net_device *dev)
+{
+	bt_info_t *bt_dhcp = NULL;
+
+	bt_dhcp = kmalloc(sizeof(bt_info_t), GFP_KERNEL);
+	if (!bt_dhcp)
+		return -ENOMEM;
+
+	memset(bt_dhcp, 0, sizeof(bt_info_t));
+
+	g_bt = bt_dhcp;
+	bt_dhcp->dev = dev;
+	bt_dhcp->bt_state = BT_DHCP_IDLE;
+
+	
+	bt_dhcp->timer_ms    = 10;
+	init_timer(&bt_dhcp->timer);
+	bt_dhcp->timer.data = (ulong)bt_dhcp;
+	bt_dhcp->timer.function = wl_iw_bt_timerfunc;
+	bt_dhcp->ts_dhcp_start = 0;
+	bt_dhcp->ts_dhcp_ok = 0;
+
+	PROC_START(_bt_dhcp_sysioc_thread, bt_dhcp, &bt_dhcp->tsk_ctl, 0);
+	if (bt_dhcp->tsk_ctl.thr_pid < 0) {
+		WL_ERROR(("Failed in %s\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+#endif 
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+#if defined(WL_IW_USE_ISCAN)
+	int params_size = 0;
+#endif 
+	wl_iw_t *iw;
+#if defined(WL_IW_USE_ISCAN)
+	iscan_info_t *iscan = NULL;
+#endif
+
+	DHD_OS_MUTEX_INIT(&wl_cache_lock);
+	DHD_OS_MUTEX_INIT(&wl_softap_lock);
+
+#if defined(WL_IW_USE_ISCAN)
+	if (!dev)
+		return 0;
+
+	
+	memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t));
+
+	
+#ifdef CSCAN
+	params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)) +
+	    (WL_NUMCHANNELS * sizeof(uint16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
+#else
+	params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+#endif 
+	iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+	if (!iscan)
+		return -ENOMEM;
+	memset(iscan, 0, sizeof(iscan_info_t));
+
+	
+	iscan->iscan_ex_params_p = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+	if (!iscan->iscan_ex_params_p) {
+		kfree(iscan);
+		return -ENOMEM;
+	}
+	iscan->iscan_ex_param_size = params_size;
+
+	
+	g_iscan = iscan;
+	iscan->dev = dev;
+	iscan->iscan_state = ISCAN_STATE_IDLE;
+
+#if defined(CONFIG_FIRST_SCAN)
+	g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE;
+	g_first_counter_scans = 0;
+	g_iscan->scan_flag = 0;
+#endif 
+
+	
+	iscan->timer_ms    = 8000;
+	init_timer(&iscan->timer);
+	iscan->timer.data = (ulong)iscan;
+	iscan->timer.function = wl_iw_timerfunc;
+
+	PROC_START(_iscan_sysioc_thread, iscan, &iscan->tsk_ctl, 0);
+	if (iscan->tsk_ctl.thr_pid < 0)
+		return -ENOMEM;
+#endif 
+
+	iw = *(wl_iw_t **)netdev_priv(dev);
+	iw->pub = (dhd_pub_t *)dhdp;
+#ifdef SOFTAP
+	priv_dev = dev;
+#endif 
+	g_scan = NULL;
+
+	
+	g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+	if (!g_scan)
+		return -ENOMEM;
+
+	memset(g_scan, 0, G_SCAN_RESULTS);
+	g_scan_specified_ssid = 0;
+
+#if !defined(CSCAN)
+	
+	wl_iw_init_ss_cache_ctrl();
+#endif 
+#ifdef COEX_DHCP
+	
+	wl_iw_bt_init(dev);
+#endif 
+
+
+	return 0;
+}
+
+void
+wl_iw_detach(void)
+{
+#if defined(WL_IW_USE_ISCAN)
+	iscan_buf_t  *buf;
+	iscan_info_t *iscan = g_iscan;
+
+	if (!iscan)
+		return;
+	if (iscan->tsk_ctl.thr_pid >= 0) {
+		PROC_STOP(&iscan->tsk_ctl);
+	}
+	DHD_OS_MUTEX_LOCK(&wl_cache_lock);
+	while (iscan->list_hdr) {
+		buf = iscan->list_hdr->next;
+		kfree(iscan->list_hdr);
+		iscan->list_hdr = buf;
+	}
+	kfree(iscan->iscan_ex_params_p);
+	kfree(iscan);
+	g_iscan = NULL;
+	DHD_OS_MUTEX_UNLOCK(&wl_cache_lock);
+#endif 
+
+	if (g_scan)
+		kfree(g_scan);
+
+	g_scan = NULL;
+#if !defined(CSCAN)
+	wl_iw_release_ss_cache_ctrl();
+#endif 
+#ifdef COEX_DHCP
+	wl_iw_bt_release();
+#endif 
+
+#ifdef SOFTAP
+	if (ap_cfg_running) {
+		WL_TRACE(("\n%s AP is going down\n", __FUNCTION__));
+		
+		wl_iw_send_priv_event(priv_dev, "AP_DOWN");
+	}
+#endif
+
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 0000000..a34472f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,300 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h,v 1.15.80.6 2010-12-23 01:13:23 Exp $
+ */
+
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+#define GET_SSID			"SSID="
+#define GET_CHANNEL			"CH="
+#define GET_NPROBE 			"NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL  	"ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL  	"PASSIVE="
+#define GET_HOME_DWELL  		"HOME="
+#define GET_SCAN_TYPE			"TYPE="
+
+#define BAND_GET_CMD				"GETBAND"
+#define BAND_SET_CMD				"SETBAND"
+#define DTIM_SKIP_GET_CMD			"DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD			"DTIMSKIPSET"
+#define SETSUSPEND_CMD				"SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD			"PNOSSIDCLR"
+
+#define PNOSETUP_SET_CMD			"PNOSETUP " 
+#define PNOENABLE_SET_CMD			"PNOFORCE"
+#define PNODEBUG_SET_CMD			"PNODEBUG"
+#define TXPOWER_SET_CMD			"TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+typedef struct wl_iw_extra_params {
+	int 	target_channel; 
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];	
+	char custom_locale[WLC_CNTRY_BUF_SZ];	
+	int32 custom_locale_rev;		
+};
+
+
+#define	WL_IW_RSSI_MINVAL		-200	
+#define	WL_IW_RSSI_NO_SIGNAL	-91	
+#define	WL_IW_RSSI_VERY_LOW	-80	
+#define	WL_IW_RSSI_LOW		-70	
+#define	WL_IW_RSSI_GOOD		-68	
+#define	WL_IW_RSSI_VERY_GOOD	-58	
+#define	WL_IW_RSSI_EXCELLENT	-57	
+#define	WL_IW_RSSI_INVALID	 0	
+#define MAX_WX_STRING 80
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN	(SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI			(SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN	(SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED	(SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR	(SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP				(SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START			(SIOCIWFIRSTPRIV+13)
+
+
+#define WL_SET_AP_CFG           (SIOCIWFIRSTPRIV+15)
+#define WL_AP_STA_LIST          (SIOCIWFIRSTPRIV+17)
+#define WL_AP_MAC_FLTR	        (SIOCIWFIRSTPRIV+19)
+#define WL_AP_BSS_START         (SIOCIWFIRSTPRIV+21)
+#define AP_LPB_CMD              (SIOCIWFIRSTPRIV+23)
+#define WL_AP_STOP              (SIOCIWFIRSTPRIV+25)
+#define WL_FW_RELOAD            (SIOCIWFIRSTPRIV+27)
+#define WL_AP_STA_DISASSOC		(SIOCIWFIRSTPRIV+29)
+#define WL_COMBO_SCAN           (SIOCIWFIRSTPRIV+31)
+
+
+#define			G_SCAN_RESULTS 8*1024
+#define 		WE_ADD_EVENT_FIX	0x80
+#define          G_WLAN_SET_ON	0
+#define          G_WLAN_SET_OFF	1
+
+#define CHECK_EXTRA_FOR_NULL(extra) \
+if (!extra) { \
+	WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \
+	return -EINVAL; \
+}
+
+typedef struct wl_iw {
+	char nickname[IW_ESSID_MAX_SIZE];
+
+	struct iw_statistics wstats;
+
+	int spy_num;
+	int wpaversion;			
+	int pcipher;			
+	int gcipher;			
+	int privacy_invoked; 		
+
+	struct ether_addr spy_addr[IW_MAX_SPY];
+	struct iw_quality spy_qual[IW_MAX_SPY];
+	void  *wlinfo;
+	dhd_pub_t * pub;
+} wl_iw_t;
+
+int	 wl_control_wl_start(struct net_device *dev);
+#define WLC_IW_SS_CACHE_MAXLEN				2048
+#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN	32
+#define WLC_IW_BSS_INFO_MAXLEN 				\
+	(WLC_IW_SS_CACHE_MAXLEN - WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN)
+
+typedef struct wl_iw_ss_cache {
+	struct wl_iw_ss_cache *next;
+	int dirty;
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_iw_ss_cache_t;
+
+typedef struct wl_iw_ss_cache_ctrl {
+	wl_iw_ss_cache_t *m_cache_head;	
+	int m_link_down;		
+	int m_timer_expired;		
+	char m_active_bssid[ETHER_ADDR_LEN];	
+	uint m_prev_scan_mode;	
+	uint m_cons_br_scan_cnt;	
+	struct timer_list *m_timer;	
+} wl_iw_ss_cache_ctrl_t;
+
+typedef enum broadcast_first_scan {
+	BROADCAST_SCAN_FIRST_IDLE = 0,
+	BROADCAST_SCAN_FIRST_STARTED,
+	BROADCAST_SCAN_FIRST_RESULT_READY,
+	BROADCAST_SCAN_FIRST_RESULT_CONSUMED
+} broadcast_first_scan_t;
+#ifdef SOFTAP
+#define SSID_LEN	33
+#define SEC_LEN		16
+#define KEY_LEN		65
+#define PROFILE_OFFSET	32
+struct ap_profile {
+	uint8	ssid[SSID_LEN];
+	uint8	sec[SEC_LEN];
+	uint8	key[KEY_LEN];
+	uint32	channel; 
+	uint32	preamble;
+	uint32	max_scb;	
+	uint32  closednet;  
+	char country_code[WLC_CNTRY_BUF_SZ];
+};
+
+
+#define MACLIST_MODE_DISABLED	0
+#define MACLIST_MODE_DENY		1
+#define MACLIST_MODE_ALLOW		2
+struct mflist {
+	uint count;
+	struct ether_addr ea[16];
+};
+struct mac_list_set {
+	uint32	mode;
+	struct mflist mac_list;
+};
+#endif   
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif 
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+void wl_iw_detach(void);
+
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+void	dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+
+#define PNO_TLV_PREFIX			'S'
+#define PNO_TLV_VERSION			'1'
+#define PNO_TLV_SUBVERSION 		'2'
+#define PNO_TLV_RESERVED		'0'
+#define PNO_TLV_TYPE_SSID_IE		'S'
+#define PNO_TLV_TYPE_TIME		'T'
+#define PNO_TLV_FREQ_REPEAT		'R'
+#define PNO_TLV_FREQ_EXPO_MAX	'M'
+#define  PNO_EVENT_UP			"PNO_EVENT"
+
+typedef struct cmd_tlv {
+	char prefix;
+	char version;
+	char subver;
+	char reserved;
+} cmd_tlv_t;
+
+
+
+
+typedef struct cscan_tlv {
+	char prefix;
+	char version;
+	char subver;
+	char reserved;
+} cscan_tlv_t;
+
+#define CSCAN_COMMAND				"CSCAN "
+#define CSCAN_TLV_PREFIX 			'S'
+#define CSCAN_TLV_VERSION			1
+#define CSCAN_TLV_SUBVERSION			0
+#define CSCAN_TLV_TYPE_SSID_IE          'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE   'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE     'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE      'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE    'P'
+#define CSCAN_TLV_TYPE_HOME_IE         'H'
+#define CSCAN_TLV_TYPE_STYPE_IE        'T'
+
+#ifdef SOFTAP_TLV_CFG
+
+#define SOFTAP_SET_CMD				"SOFTAPSET "
+#define SOFTAP_TLV_PREFIX			'A'
+#define SOFTAP_TLV_VERSION			'1'
+#define SOFTAP_TLV_SUBVERSION		'0'
+#define SOFTAP_TLV_RESERVED		'0'
+
+#define TLV_TYPE_SSID				'S'
+#define TLV_TYPE_SECUR				'E'
+#define TLV_TYPE_KEY				'K'
+#define TLV_TYPE_CHANNEL			'C'
+#endif 
+
+extern int wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+	int channel_num, int *bytes_left);
+
+extern int wl_iw_parse_data_tlv(char** list_str, void  *dst, int dst_size,
+                                const char token, int input_size, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+                                     int max, int *bytes_left);
+
+extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max);
+
+extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num);
+
+
+#define NETDEV_PRIV(dev)	(*(wl_iw_t **)netdev_priv(dev))
+
+#endif 
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 0000000..3c9f658
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,310 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+	s32 ret = 0;
+	struct ifreq ifr;
+	struct wl_ioctl ioc;
+	mm_segment_t fs;
+	s32 err = 0;
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+	strcpy(ifr.ifr_name, dev->name);
+	ifr.ifr_data = (caddr_t)&ioc;
+
+	fs = get_fs();
+	set_fs(get_ds());
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	err = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+	err = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+	set_fs(fs);
+
+	return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+	s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, u32 buflen)
+{
+	s32 iolen = 0;
+
+	iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+	return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen)
+{
+	s32 ret = 0;
+	s32 iovar_len = 0;
+
+	iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, iovar_len, FALSE);
+	return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+
+	iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	return ret;
+}
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf));
+}
+
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf));
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ *  taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ *  wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+
+	if (bssidx == 0) {
+		return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+			(s8 *) iovar_buf, buflen);
+	}
+
+	prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+	namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+	if (buflen < 0 || iolen > (u32)buflen)
+	{
+		DHD_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+		return BCME_BUFTOOSHORT;
+	}
+
+	p = (s8 *)iovar_buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, iovar_name, namelen);
+	p += namelen;
+
+	/* bss config index as first param */
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(u32));
+	p += sizeof(u32);
+
+	/* parameter buffer follows */
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx)
+{
+	s32 ret = 0;
+	s32 iovar_len = 0;
+
+	iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, iovar_len, FALSE);
+	return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+
+	iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), bssidx);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), bssidx);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+int wldev_get_link_speed(
+	struct net_device *dev, int *plink_speed)
+{
+	int error;
+
+	if (!plink_speed)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+	if (unlikely(error))
+		return error;
+
+	/* Convert internal 500Kbps to Kbps */
+	*plink_speed *= 500;
+	return error;
+}
+
+int wldev_get_rssi(
+	struct net_device *dev, int *prssi)
+{
+	scb_val_t scb_val;
+	int error;
+
+	if (!prssi)
+		return -ENOMEM;
+	bzero(&scb_val, sizeof(scb_val_t));
+
+	error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+	if (unlikely(error))
+		return error;
+
+	*prssi = dtoh32(scb_val.val);
+	return error;
+}
+
+int wldev_get_ssid(
+	struct net_device *dev, wlc_ssid_t *pssid)
+{
+	int error;
+
+	if (!pssid)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+	if (unlikely(error))
+		return error;
+	pssid->SSID_len = dtoh32(pssid->SSID_len);
+	return error;
+}
+
+int wldev_get_band(
+	struct net_device *dev, uint *pband)
+{
+	int error;
+
+	error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+	return error;
+}
+
+int wldev_set_band(
+	struct net_device *dev, uint band)
+{
+	int error = -1;
+
+	if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+		error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), 1);
+	}
+	return error;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 0000000..70249f4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,97 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2011, Broadcom Corporation
+ * 
+ *         Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.h,v 1.1.4.1.2.14 2011-02-09 01:40:07 Exp $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/** wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or 
+ *  netdev_ops->ndo_do_ioctl in new kernels)
+ *  @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with 
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen);
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ *  indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx);
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 69f8aa3..81b7201 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <linux/notifier.h>
 #include <linux/power_supply.h>
 #include <linux/pda_power.h>
 #include <linux/regulator/consumer.h>
@@ -38,9 +39,8 @@
 static struct timer_list polling_timer;
 static int polling;
 
-#ifdef CONFIG_USB_OTG_UTILS
 static struct otg_transceiver *transceiver;
-#endif
+static struct notifier_block otg_nb;
 static struct regulator *ac_draw;
 
 enum {
@@ -222,7 +222,42 @@
 #ifdef CONFIG_USB_OTG_UTILS
 static int otg_is_usb_online(void)
 {
-	return (transceiver->state == OTG_STATE_B_PERIPHERAL);
+	return (transceiver->last_event == USB_EVENT_VBUS ||
+		transceiver->last_event == USB_EVENT_ENUMERATED);
+}
+
+static int otg_is_ac_online(void)
+{
+	return (transceiver->last_event == USB_EVENT_CHARGER);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *unused)
+{
+	switch (event) {
+	case USB_EVENT_CHARGER:
+		ac_status = PDA_PSY_TO_CHANGE;
+		break;
+	case USB_EVENT_VBUS:
+	case USB_EVENT_ENUMERATED:
+		usb_status = PDA_PSY_TO_CHANGE;
+		break;
+	case USB_EVENT_NONE:
+		ac_status = PDA_PSY_TO_CHANGE;
+		usb_status = PDA_PSY_TO_CHANGE;
+		break;
+	default:
+		return NOTIFY_OK;
+	}
+
+	/*
+	 * Wait a bit before reading ac/usb line status and setting charger,
+	 * because ac/usb status readings may lag from irq.
+	 */
+	mod_timer(&charger_timer,
+		  jiffies + msecs_to_jiffies(pdata->wait_for_status));
+
+	return NOTIFY_OK;
 }
 #endif
 
@@ -282,6 +317,14 @@
 		ret = PTR_ERR(ac_draw);
 	}
 
+	transceiver = otg_get_transceiver();
+	if (transceiver && !pdata->is_usb_online) {
+		pdata->is_usb_online = otg_is_usb_online;
+	}
+	if (transceiver && !pdata->is_ac_online) {
+		pdata->is_ac_online = otg_is_ac_online;
+	}
+
 	if (pdata->is_ac_online) {
 		ret = power_supply_register(&pdev->dev, &pda_psy_ac);
 		if (ret) {
@@ -303,13 +346,6 @@
 		}
 	}
 
-#ifdef CONFIG_USB_OTG_UTILS
-	transceiver = otg_get_transceiver();
-	if (transceiver && !pdata->is_usb_online) {
-		pdata->is_usb_online = otg_is_usb_online;
-	}
-#endif
-
 	if (pdata->is_usb_online) {
 		ret = power_supply_register(&pdev->dev, &pda_psy_usb);
 		if (ret) {
@@ -331,6 +367,16 @@
 		}
 	}
 
+	if (transceiver && pdata->use_otg_notifier) {
+		otg_nb.notifier_call = otg_handle_notification;
+		ret = otg_register_notifier(transceiver, &otg_nb);
+		if (ret) {
+			dev_err(dev, "failure to register otg notifier\n");
+			goto otg_reg_notifier_failed;
+		}
+		polling = 0;
+	}
+
 	if (polling) {
 		dev_dbg(dev, "will poll for status\n");
 		setup_timer(&polling_timer, polling_timer_func, 0);
@@ -343,16 +389,17 @@
 
 	return 0;
 
+otg_reg_notifier_failed:
+	if (pdata->is_usb_online && usb_irq)
+		free_irq(usb_irq->start, &pda_psy_usb);
 usb_irq_failed:
 	if (pdata->is_usb_online)
 		power_supply_unregister(&pda_psy_usb);
 usb_supply_failed:
 	if (pdata->is_ac_online && ac_irq)
 		free_irq(ac_irq->start, &pda_psy_ac);
-#ifdef CONFIG_USB_OTG_UTILS
 	if (transceiver)
 		otg_put_transceiver(transceiver);
-#endif
 ac_irq_failed:
 	if (pdata->is_ac_online)
 		power_supply_unregister(&pda_psy_ac);
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 329b46b..03810ce 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -41,23 +41,40 @@
 
 static void power_supply_changed_work(struct work_struct *work)
 {
+	unsigned long flags;
 	struct power_supply *psy = container_of(work, struct power_supply,
 						changed_work);
 
 	dev_dbg(psy->dev, "%s\n", __func__);
 
-	class_for_each_device(power_supply_class, NULL, psy,
-			      __power_supply_changed_work);
+	spin_lock_irqsave(&psy->changed_lock, flags);
+	if (psy->changed) {
+		psy->changed = false;
+		spin_unlock_irqrestore(&psy->changed_lock, flags);
 
-	power_supply_update_leds(psy);
+		class_for_each_device(power_supply_class, NULL, psy,
+				      __power_supply_changed_work);
 
-	kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+		power_supply_update_leds(psy);
+
+		kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+		spin_lock_irqsave(&psy->changed_lock, flags);
+	}
+	if (!psy->changed)
+		wake_unlock(&psy->work_wake_lock);
+	spin_unlock_irqrestore(&psy->changed_lock, flags);
 }
 
 void power_supply_changed(struct power_supply *psy)
 {
+	unsigned long flags;
+
 	dev_dbg(psy->dev, "%s\n", __func__);
 
+	spin_lock_irqsave(&psy->changed_lock, flags);
+	psy->changed = true;
+	wake_lock(&psy->work_wake_lock);
+	spin_unlock_irqrestore(&psy->changed_lock, flags);
 	schedule_work(&psy->changed_work);
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -181,6 +198,9 @@
 	if (rc)
 		goto device_add_failed;
 
+	spin_lock_init(&psy->changed_lock);
+	wake_lock_init(&psy->work_wake_lock, WAKE_LOCK_SUSPEND, "power-supply");
+
 	rc = power_supply_create_triggers(psy);
 	if (rc)
 		goto create_triggers_failed;
@@ -190,6 +210,7 @@
 	goto success;
 
 create_triggers_failed:
+	wake_lock_destroy(&psy->work_wake_lock);
 	device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
@@ -203,6 +224,7 @@
 {
 	cancel_work_sync(&psy->changed_work);
 	power_supply_remove_triggers(psy);
+	wake_lock_destroy(&psy->work_wake_lock);
 	device_unregister(psy->dev);
 }
 EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index ce2aabf..27c3774 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -106,6 +106,24 @@
 	  clock several times per second, please enable this option
 	  only if you know that you really need it.
 
+config RTC_INTF_ALARM
+	bool "Android alarm driver"
+	depends on RTC_CLASS
+	default y
+	help
+	  Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+	  elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+	  Also provides an interface to set the wall time which must be used
+	  for elapsed realtime to work.
+
+config RTC_INTF_ALARM_DEV
+	bool "Android alarm device"
+	depends on RTC_INTF_ALARM
+	default y
+	help
+	  Exports the alarm interface to user-space.
+
+
 config RTC_DRV_TEST
 	tristate "Test driver/device"
 	help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0ffefe8..7d27958 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -9,6 +9,8 @@
 obj-$(CONFIG_RTC_CLASS)		+= rtc-core.o
 rtc-core-y			:= class.o interface.o
 
+obj-$(CONFIG_RTC_INTF_ALARM) += alarm.o
+obj-$(CONFIG_RTC_INTF_ALARM_DEV) += alarm-dev.o
 rtc-core-$(CONFIG_RTC_INTF_DEV)	+= rtc-dev.o
 rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
 rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
diff --git a/drivers/rtc/alarm-dev.c b/drivers/rtc/alarm-dev.c
new file mode 100644
index 0000000..686e6f7
--- /dev/null
+++ b/drivers/rtc/alarm-dev.c
@@ -0,0 +1,286 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+	do { \
+		if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+			pr_info(args); \
+		} \
+	} while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+	ANDROID_ALARM_RTC_WAKEUP_MASK | \
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD               _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD      _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wake_lock alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int rv = 0;
+	unsigned long flags;
+	struct timespec new_alarm_time;
+	struct timespec new_rtc_time;
+	struct timespec tmp_time;
+	enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+	uint32_t alarm_type_mask = 1U << alarm_type;
+
+	if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+		return -EINVAL;
+
+	if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+		if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+			return -EPERM;
+		if (file->private_data == NULL &&
+		    cmd != ANDROID_ALARM_SET_RTC) {
+			spin_lock_irqsave(&alarm_slock, flags);
+			if (alarm_opened) {
+				spin_unlock_irqrestore(&alarm_slock, flags);
+				return -EBUSY;
+			}
+			alarm_opened = 1;
+			file->private_data = (void *)1;
+			spin_unlock_irqrestore(&alarm_slock, flags);
+		}
+	}
+
+	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+	case ANDROID_ALARM_CLEAR(0):
+		spin_lock_irqsave(&alarm_slock, flags);
+		pr_alarm(IO, "alarm %d clear\n", alarm_type);
+		alarm_try_to_cancel(&alarms[alarm_type]);
+		if (alarm_pending) {
+			alarm_pending &= ~alarm_type_mask;
+			if (!alarm_pending && !wait_pending)
+				wake_unlock(&alarm_wake_lock);
+		}
+		alarm_enabled &= ~alarm_type_mask;
+		spin_unlock_irqrestore(&alarm_slock, flags);
+		break;
+
+	case ANDROID_ALARM_SET_OLD:
+	case ANDROID_ALARM_SET_AND_WAIT_OLD:
+		if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+			rv = -EFAULT;
+			goto err1;
+		}
+		new_alarm_time.tv_nsec = 0;
+		goto from_old_alarm_set;
+
+	case ANDROID_ALARM_SET_AND_WAIT(0):
+	case ANDROID_ALARM_SET(0):
+		if (copy_from_user(&new_alarm_time, (void __user *)arg,
+		    sizeof(new_alarm_time))) {
+			rv = -EFAULT;
+			goto err1;
+		}
+from_old_alarm_set:
+		spin_lock_irqsave(&alarm_slock, flags);
+		pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+			new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+		alarm_enabled |= alarm_type_mask;
+		alarm_start_range(&alarms[alarm_type],
+			timespec_to_ktime(new_alarm_time),
+			timespec_to_ktime(new_alarm_time));
+		spin_unlock_irqrestore(&alarm_slock, flags);
+		if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+		    && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+			break;
+		/* fall though */
+	case ANDROID_ALARM_WAIT:
+		spin_lock_irqsave(&alarm_slock, flags);
+		pr_alarm(IO, "alarm wait\n");
+		if (!alarm_pending && wait_pending) {
+			wake_unlock(&alarm_wake_lock);
+			wait_pending = 0;
+		}
+		spin_unlock_irqrestore(&alarm_slock, flags);
+		rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+		if (rv)
+			goto err1;
+		spin_lock_irqsave(&alarm_slock, flags);
+		rv = alarm_pending;
+		wait_pending = 1;
+		alarm_pending = 0;
+		spin_unlock_irqrestore(&alarm_slock, flags);
+		break;
+	case ANDROID_ALARM_SET_RTC:
+		if (copy_from_user(&new_rtc_time, (void __user *)arg,
+		    sizeof(new_rtc_time))) {
+			rv = -EFAULT;
+			goto err1;
+		}
+		rv = alarm_set_rtc(new_rtc_time);
+		spin_lock_irqsave(&alarm_slock, flags);
+		alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+		wake_up(&alarm_wait_queue);
+		spin_unlock_irqrestore(&alarm_slock, flags);
+		if (rv < 0)
+			goto err1;
+		break;
+	case ANDROID_ALARM_GET_TIME(0):
+		switch (alarm_type) {
+		case ANDROID_ALARM_RTC_WAKEUP:
+		case ANDROID_ALARM_RTC:
+			getnstimeofday(&tmp_time);
+			break;
+		case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+		case ANDROID_ALARM_ELAPSED_REALTIME:
+			tmp_time =
+				ktime_to_timespec(alarm_get_elapsed_realtime());
+			break;
+		case ANDROID_ALARM_TYPE_COUNT:
+		case ANDROID_ALARM_SYSTEMTIME:
+			ktime_get_ts(&tmp_time);
+			break;
+		}
+		if (copy_to_user((void __user *)arg, &tmp_time,
+		    sizeof(tmp_time))) {
+			rv = -EFAULT;
+			goto err1;
+		}
+		break;
+
+	default:
+		rv = -EINVAL;
+		goto err1;
+	}
+err1:
+	return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+	file->private_data = NULL;
+	return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	if (file->private_data != 0) {
+		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+			uint32_t alarm_type_mask = 1U << i;
+			if (alarm_enabled & alarm_type_mask) {
+				pr_alarm(INFO, "alarm_release: clear alarm, "
+					"pending %d\n",
+					!!(alarm_pending & alarm_type_mask));
+				alarm_enabled &= ~alarm_type_mask;
+			}
+			spin_unlock_irqrestore(&alarm_slock, flags);
+			alarm_cancel(&alarms[i]);
+			spin_lock_irqsave(&alarm_slock, flags);
+		}
+		if (alarm_pending | wait_pending) {
+			if (alarm_pending)
+				pr_alarm(INFO, "alarm_release: clear "
+					"pending alarms %x\n", alarm_pending);
+			wake_unlock(&alarm_wake_lock);
+			wait_pending = 0;
+			alarm_pending = 0;
+		}
+		alarm_opened = 0;
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	return 0;
+}
+
+static void alarm_triggered(struct alarm *alarm)
+{
+	unsigned long flags;
+	uint32_t alarm_type_mask = 1U << alarm->type;
+
+	pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+	spin_lock_irqsave(&alarm_slock, flags);
+	if (alarm_enabled & alarm_type_mask) {
+		wake_lock_timeout(&alarm_wake_lock, 5 * HZ);
+		alarm_enabled &= ~alarm_type_mask;
+		alarm_pending |= alarm_type_mask;
+		wake_up(&alarm_wait_queue);
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = alarm_ioctl,
+	.open = alarm_open,
+	.release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "alarm",
+	.fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+	int err;
+	int i;
+
+	err = misc_register(&alarm_device);
+	if (err)
+		return err;
+
+	for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+		alarm_init(&alarms[i], i, alarm_triggered);
+	wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm");
+
+	return 0;
+}
+
+static void  __exit alarm_dev_exit(void)
+{
+	misc_deregister(&alarm_device);
+	wake_lock_destroy(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
new file mode 100644
index 0000000..e0e98dd
--- /dev/null
+++ b/drivers/rtc/alarm.c
@@ -0,0 +1,590 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/wakelock.h>
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+			ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+	do { \
+		if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+			pr_info(args); \
+		} \
+	} while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+	ANDROID_ALARM_RTC_WAKEUP_MASK | \
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD               _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD      _IOW('a', 3, time_t)
+
+struct alarm_queue {
+	struct rb_root alarms;
+	struct rb_node *first;
+	struct hrtimer timer;
+	ktime_t delta;
+	bool stopped;
+	ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct wake_lock alarm_rtc_wake_lock;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+	struct alarm *alarm;
+	bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+			base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+	if (base->stopped) {
+		pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+		return;
+	}
+
+	if (is_wakeup && !suspended && head_removed)
+		wake_unlock(&alarm_rtc_wake_lock);
+
+	if (!base->first)
+		return;
+
+	alarm = container_of(base->first, struct alarm, node);
+
+	pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+		alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+	if (is_wakeup && suspended) {
+		pr_alarm(FLOW, "changed alarm while suspened\n");
+		wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+		return;
+	}
+
+	hrtimer_try_to_cancel(&base->timer);
+	base->timer.node.expires = ktime_add(base->delta, alarm->expires);
+	base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+	hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct alarm *alarm)
+{
+	struct alarm_queue *base = &alarms[alarm->type];
+	struct rb_node **link = &base->alarms.rb_node;
+	struct rb_node *parent = NULL;
+	struct alarm *entry;
+	int leftmost = 1;
+	bool was_first = false;
+
+	pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+		alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+	if (base->first == &alarm->node) {
+		base->first = rb_next(&alarm->node);
+		was_first = true;
+	}
+	if (!RB_EMPTY_NODE(&alarm->node)) {
+		rb_erase(&alarm->node, &base->alarms);
+		RB_CLEAR_NODE(&alarm->node);
+	}
+
+	while (*link) {
+		parent = *link;
+		entry = rb_entry(parent, struct alarm, node);
+		/*
+		* We dont care about collisions. Nodes with
+		* the same expiry time stay together.
+		*/
+		if (alarm->expires.tv64 < entry->expires.tv64) {
+			link = &(*link)->rb_left;
+		} else {
+			link = &(*link)->rb_right;
+			leftmost = 0;
+		}
+	}
+	if (leftmost)
+		base->first = &alarm->node;
+	if (leftmost || was_first)
+		update_timer_locked(base, was_first);
+
+	rb_link_node(&alarm->node, parent, link);
+	rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * alarm_init - initialize an alarm
+ * @alarm:	the alarm to be initialized
+ * @type:	the alarm type to be used
+ * @function:	alarm callback function
+ */
+void alarm_init(struct alarm *alarm,
+	enum android_alarm_type type, void (*function)(struct alarm *))
+{
+	RB_CLEAR_NODE(&alarm->node);
+	alarm->type = type;
+	alarm->function = function;
+
+	pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * alarm_start_range - (re)start an alarm
+ * @alarm:	the alarm to be added
+ * @start:	earliest expiry time
+ * @end:	expiry time
+ */
+void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	alarm->softexpires = start;
+	alarm->expires = end;
+	alarm_enqueue_locked(alarm);
+	spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm:	alarm to stop
+ *
+ * Returns:
+ *  0 when the alarm was not active
+ *  1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ *    cannot be stopped (it may also be inactive)
+ */
+int alarm_try_to_cancel(struct alarm *alarm)
+{
+	struct alarm_queue *base = &alarms[alarm->type];
+	unsigned long flags;
+	bool first = false;
+	int ret = 0;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	if (!RB_EMPTY_NODE(&alarm->node)) {
+		pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+			alarm->type, alarm->function,
+			ktime_to_ns(alarm->expires));
+		ret = 1;
+		if (base->first == &alarm->node) {
+			base->first = rb_next(&alarm->node);
+			first = true;
+		}
+		rb_erase(&alarm->node, &base->alarms);
+		RB_CLEAR_NODE(&alarm->node);
+		if (first)
+			update_timer_locked(base, true);
+	} else
+		pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+			alarm->type, alarm->function);
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	if (!ret && hrtimer_callback_running(&base->timer))
+		ret = -1;
+	return ret;
+}
+
+/**
+ * alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm:	the alarm to be cancelled
+ *
+ * Returns:
+ *  0 when the alarm was not active
+ *  1 when the alarm was active
+ */
+int alarm_cancel(struct alarm *alarm)
+{
+	for (;;) {
+		int ret = alarm_try_to_cancel(alarm);
+		if (ret >= 0)
+			return ret;
+		cpu_relax();
+	}
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time:	timespec value containing the new time
+ */
+int alarm_set_rtc(struct timespec new_time)
+{
+	int i;
+	int ret;
+	unsigned long flags;
+	struct rtc_time rtc_new_rtc_time;
+	struct timespec tmp_time;
+
+	rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+	pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+		new_time.tv_sec, new_time.tv_nsec,
+		rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+		rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+		rtc_new_rtc_time.tm_mday,
+		rtc_new_rtc_time.tm_year + 1900);
+
+	mutex_lock(&alarm_setrtc_mutex);
+	spin_lock_irqsave(&alarm_slock, flags);
+	wake_lock(&alarm_rtc_wake_lock);
+	getnstimeofday(&tmp_time);
+	for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+		hrtimer_try_to_cancel(&alarms[i].timer);
+		alarms[i].stopped = true;
+		alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+	}
+	alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+		alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+		ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+			timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	ret = do_settimeofday(&new_time);
+	spin_lock_irqsave(&alarm_slock, flags);
+	for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+		alarms[i].stopped = false;
+		update_timer_locked(&alarms[i], false);
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	if (ret < 0) {
+		pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+		goto err;
+	}
+	if (!alarm_rtc_dev) {
+		pr_alarm(ERROR,
+			"alarm_set_rtc: no RTC, time will be lost on reboot\n");
+		goto err;
+	}
+	ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+	if (ret < 0)
+		pr_alarm(ERROR, "alarm_set_rtc: "
+			"Failed to set RTC, time will be lost on reboot\n");
+err:
+	wake_unlock(&alarm_rtc_wake_lock);
+	mutex_unlock(&alarm_setrtc_mutex);
+	return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+	ktime_t now;
+	unsigned long flags;
+	struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	now = base->stopped ? base->stopped_time : ktime_get_real();
+	now = ktime_sub(now, base->delta);
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+	struct alarm_queue *base;
+	struct alarm *alarm;
+	unsigned long flags;
+	ktime_t now;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+
+	base = container_of(timer, struct alarm_queue, timer);
+	now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+	now = ktime_sub(now, base->delta);
+
+	pr_alarm(INT, "alarm_timer_triggered type %d at %lld\n",
+		base - alarms, ktime_to_ns(now));
+
+	while (base->first) {
+		alarm = container_of(base->first, struct alarm, node);
+		if (alarm->softexpires.tv64 > now.tv64) {
+			pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+				alarm->function, ktime_to_ns(alarm->expires),
+				ktime_to_ns(alarm->softexpires));
+			break;
+		}
+		base->first = rb_next(&alarm->node);
+		rb_erase(&alarm->node, &base->alarms);
+		RB_CLEAR_NODE(&alarm->node);
+		pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+			alarm->type, alarm->function,
+			ktime_to_ns(alarm->expires),
+			ktime_to_ns(alarm->softexpires));
+		spin_unlock_irqrestore(&alarm_slock, flags);
+		alarm->function(alarm);
+		spin_lock_irqsave(&alarm_slock, flags);
+	}
+	if (!base->first)
+		pr_alarm(FLOW, "no more alarms of type %d\n", base - alarms);
+	update_timer_locked(base, true);
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+	struct rtc_device *rtc = alarm_rtc_dev;
+	if (!(rtc->irq_data & RTC_AF))
+		return;
+	pr_alarm(INT, "rtc alarm triggered\n");
+	wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int                 err = 0;
+	unsigned long       flags;
+	struct rtc_wkalrm   rtc_alarm;
+	struct rtc_time     rtc_current_rtc_time;
+	unsigned long       rtc_current_time;
+	unsigned long       rtc_alarm_time;
+	struct timespec     rtc_delta;
+	struct timespec     wall_time;
+	struct alarm_queue *wakeup_queue = NULL;
+	struct alarm_queue *tmp_queue = NULL;
+
+	pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	suspended = true;
+	spin_unlock_irqrestore(&alarm_slock, flags);
+
+	hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+	hrtimer_cancel(&alarms[
+			ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK].timer);
+
+	tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+	if (tmp_queue->first)
+		wakeup_queue = tmp_queue;
+	tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+	if (tmp_queue->first && (!wakeup_queue ||
+				hrtimer_get_expires(&tmp_queue->timer).tv64 <
+				hrtimer_get_expires(&wakeup_queue->timer).tv64))
+		wakeup_queue = tmp_queue;
+	if (wakeup_queue) {
+		rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+		getnstimeofday(&wall_time);
+		rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+		set_normalized_timespec(&rtc_delta,
+					wall_time.tv_sec - rtc_current_time,
+					wall_time.tv_nsec);
+
+		rtc_alarm_time = timespec_sub(ktime_to_timespec(
+			hrtimer_get_expires(&wakeup_queue->timer)),
+			rtc_delta).tv_sec;
+
+		rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+		rtc_alarm.enabled = 1;
+		rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+		rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+		rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+		pr_alarm(SUSPEND,
+			"rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+			rtc_alarm_time, rtc_current_time,
+			rtc_delta.tv_sec, rtc_delta.tv_nsec);
+		if (rtc_current_time + 1 >= rtc_alarm_time) {
+			pr_alarm(SUSPEND, "alarm about to go off\n");
+			memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+			rtc_alarm.enabled = 0;
+			rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+			spin_lock_irqsave(&alarm_slock, flags);
+			suspended = false;
+			wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ);
+			update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+									false);
+			update_timer_locked(&alarms[
+				ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+			err = -EBUSY;
+			spin_unlock_irqrestore(&alarm_slock, flags);
+		}
+	}
+	return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+	struct rtc_wkalrm alarm;
+	unsigned long       flags;
+
+	pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+	memset(&alarm, 0, sizeof(alarm));
+	alarm.enabled = 0;
+	rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	suspended = false;
+	update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+	update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+									false);
+	spin_unlock_irqrestore(&alarm_slock, flags);
+
+	return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+	.func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+				struct class_interface *class_intf)
+{
+	int err;
+	struct rtc_device *rtc = to_rtc_device(dev);
+
+	mutex_lock(&alarm_setrtc_mutex);
+
+	if (alarm_rtc_dev) {
+		err = -EBUSY;
+		goto err1;
+	}
+
+	alarm_platform_dev =
+		platform_device_register_simple("alarm", -1, NULL, 0);
+	if (IS_ERR(alarm_platform_dev)) {
+		err = PTR_ERR(alarm_platform_dev);
+		goto err2;
+	}
+	err = rtc_irq_register(rtc, &alarm_rtc_task);
+	if (err)
+		goto err3;
+	alarm_rtc_dev = rtc;
+	pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+	mutex_unlock(&alarm_setrtc_mutex);
+
+	return 0;
+
+err3:
+	platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+	mutex_unlock(&alarm_setrtc_mutex);
+	return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+				    struct class_interface *class_intf)
+{
+	if (dev == &alarm_rtc_dev->dev) {
+		pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+		rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+		platform_device_unregister(alarm_platform_dev);
+		alarm_rtc_dev = NULL;
+	}
+}
+
+static struct class_interface rtc_alarm_interface = {
+	.add_dev = &rtc_alarm_add_device,
+	.remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+	.suspend = alarm_suspend,
+	.resume = alarm_resume,
+	.driver = {
+		.name = "alarm"
+	}
+};
+
+static int __init alarm_late_init(void)
+{
+	unsigned long   flags;
+	struct timespec tmp_time, system_time;
+
+	/* this needs to run after the rtc is read at boot */
+	spin_lock_irqsave(&alarm_slock, flags);
+	/* We read the current rtc and system time so we can later calulate
+	 * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+	 * (rtc - (boot_rtc - boot_systemtime))
+	 */
+	getnstimeofday(&tmp_time);
+	ktime_get_ts(&system_time);
+	alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+		alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+			timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+		hrtimer_init(&alarms[i].timer,
+				CLOCK_REALTIME, HRTIMER_MODE_ABS);
+		alarms[i].timer.function = alarm_timer_triggered;
+	}
+	hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+		     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+	err = platform_driver_register(&alarm_driver);
+	if (err < 0)
+		goto err1;
+	wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc");
+	rtc_alarm_interface.class = rtc_class;
+	err = class_interface_register(&rtc_alarm_interface);
+	if (err < 0)
+		goto err2;
+
+	return 0;
+
+err2:
+	wake_lock_destroy(&alarm_rtc_wake_lock);
+	platform_driver_unregister(&alarm_driver);
+err1:
+	return err;
+}
+
+static void  __exit alarm_exit(void)
+{
+	class_interface_unregister(&rtc_alarm_interface);
+	wake_lock_destroy(&alarm_rtc_wake_lock);
+	platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 196284d..11a4b5b 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -70,6 +70,8 @@
 
 source "drivers/staging/frontier/Kconfig"
 
+source "drivers/staging/android/Kconfig"
+
 source "drivers/staging/pohmelfs/Kconfig"
 
 source "drivers/staging/phison/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index fa41b9c..ae62e92 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_RTS_PSTOR)		+= rts_pstor/
 obj-$(CONFIG_SPECTRA)		+= spectra/
 obj-$(CONFIG_TRANZPORT)		+= frontier/
+obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_POHMELFS)		+= pohmelfs/
 obj-$(CONFIG_IDE_PHISON)	+= phison/
 obj-$(CONFIG_LINE6_USB)		+= line6/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644
index 0000000..2471949
--- /dev/null
+++ b/drivers/staging/android/Kconfig
@@ -0,0 +1,95 @@
+menu "Android"
+
+config ANDROID
+	bool "Android Drivers"
+	default N
+	---help---
+	  Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+	bool "Android Binder IPC Driver"
+	default n
+
+config ANDROID_LOGGER
+	tristate "Android log driver"
+	default n
+
+config ANDROID_RAM_CONSOLE
+	bool "Android RAM buffer console"
+	default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+	bool "Enable verbose console messages on Android RAM console"
+	default y
+	depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	bool "Android RAM Console Enable error correction"
+	default n
+	depends on ANDROID_RAM_CONSOLE
+	depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+	select REED_SOLOMON
+	select REED_SOLOMON_ENC8
+	select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+	int "Android RAM Console Data data size"
+	default 128
+	help
+	  Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+	int "Android RAM Console ECC size"
+	default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+	int "Android RAM Console Symbol size"
+	default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+	hex "Android RAM Console Polynomial"
+	default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
+	default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
+	default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
+	default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
+	default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
+
+endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+	bool "Start Android RAM console early"
+	default n
+	depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+	hex "Android RAM console virtual address"
+	default 0
+	depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+	hex "Android RAM console buffer size"
+	default 0
+	depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_TIMED_OUTPUT
+	bool "Timed output class driver"
+	default y
+
+config ANDROID_TIMED_GPIO
+	tristate "Android timed gpio driver"
+	depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+	default n
+
+config ANDROID_LOW_MEMORY_KILLER
+	bool "Android Low Memory Killer"
+	default N
+	---help---
+	  Register processes to be killed when memory is low
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644
index 0000000..8e057e6
--- /dev/null
+++ b/drivers/staging/android/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
+obj-$(CONFIG_ANDROID_LOGGER)		+= logger.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE)	+= ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_OUTPUT)	+= timed_output.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO)	+= timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
new file mode 100644
index 0000000..e13b4c4
--- /dev/null
+++ b/drivers/staging/android/binder.c
@@ -0,0 +1,3600 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+	return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+	.owner = THIS_MODULE, \
+	.open = binder_##name##_open, \
+	.read = seq_read, \
+	.llseek = seq_lseek, \
+	.release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K                               0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M                               0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+	BINDER_DEBUG_USER_ERROR             = 1U << 0,
+	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
+	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
+	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
+	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
+	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
+	BINDER_DEBUG_READ_WRITE             = 1U << 6,
+	BINDER_DEBUG_USER_REFS              = 1U << 7,
+	BINDER_DEBUG_THREADS                = 1U << 8,
+	BINDER_DEBUG_TRANSACTION            = 1U << 9,
+	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
+	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
+	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
+	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
+	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
+	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+					 struct kernel_param *kp)
+{
+	int ret;
+	ret = param_set_int(val, kp);
+	if (binder_stop_on_user_error < 2)
+		wake_up(&binder_user_error_wait);
+	return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+	do { \
+		if (binder_debug_mask & mask) \
+			printk(KERN_INFO x); \
+	} while (0)
+
+#define binder_user_error(x...) \
+	do { \
+		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+			printk(KERN_INFO x); \
+		if (binder_stop_on_user_error) \
+			binder_stop_on_user_error = 2; \
+	} while (0)
+
+enum binder_stat_types {
+	BINDER_STAT_PROC,
+	BINDER_STAT_THREAD,
+	BINDER_STAT_NODE,
+	BINDER_STAT_REF,
+	BINDER_STAT_DEATH,
+	BINDER_STAT_TRANSACTION,
+	BINDER_STAT_TRANSACTION_COMPLETE,
+	BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+	int obj_created[BINDER_STAT_COUNT];
+	int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+	binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+	binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+	int debug_id;
+	int call_type;
+	int from_proc;
+	int from_thread;
+	int target_handle;
+	int to_proc;
+	int to_thread;
+	int to_node;
+	int data_size;
+	int offsets_size;
+};
+struct binder_transaction_log {
+	int next;
+	int full;
+	struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+	struct binder_transaction_log *log)
+{
+	struct binder_transaction_log_entry *e;
+	e = &log->entry[log->next];
+	memset(e, 0, sizeof(*e));
+	log->next++;
+	if (log->next == ARRAY_SIZE(log->entry)) {
+		log->next = 0;
+		log->full = 1;
+	}
+	return e;
+}
+
+struct binder_work {
+	struct list_head entry;
+	enum {
+		BINDER_WORK_TRANSACTION = 1,
+		BINDER_WORK_TRANSACTION_COMPLETE,
+		BINDER_WORK_NODE,
+		BINDER_WORK_DEAD_BINDER,
+		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+	} type;
+};
+
+struct binder_node {
+	int debug_id;
+	struct binder_work work;
+	union {
+		struct rb_node rb_node;
+		struct hlist_node dead_node;
+	};
+	struct binder_proc *proc;
+	struct hlist_head refs;
+	int internal_strong_refs;
+	int local_weak_refs;
+	int local_strong_refs;
+	void __user *ptr;
+	void __user *cookie;
+	unsigned has_strong_ref:1;
+	unsigned pending_strong_ref:1;
+	unsigned has_weak_ref:1;
+	unsigned pending_weak_ref:1;
+	unsigned has_async_transaction:1;
+	unsigned accept_fds:1;
+	unsigned min_priority:8;
+	struct list_head async_todo;
+};
+
+struct binder_ref_death {
+	struct binder_work work;
+	void __user *cookie;
+};
+
+struct binder_ref {
+	/* Lookups needed: */
+	/*   node + proc => ref (transaction) */
+	/*   desc + proc => ref (transaction, inc/dec ref) */
+	/*   node => refs + procs (proc exit) */
+	int debug_id;
+	struct rb_node rb_node_desc;
+	struct rb_node rb_node_node;
+	struct hlist_node node_entry;
+	struct binder_proc *proc;
+	struct binder_node *node;
+	uint32_t desc;
+	int strong;
+	int weak;
+	struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+	struct list_head entry; /* free and allocated entries by addesss */
+	struct rb_node rb_node; /* free entry by size or allocated entry */
+				/* by address */
+	unsigned free:1;
+	unsigned allow_user_free:1;
+	unsigned async_transaction:1;
+	unsigned debug_id:29;
+
+	struct binder_transaction *transaction;
+
+	struct binder_node *target_node;
+	size_t data_size;
+	size_t offsets_size;
+	uint8_t data[0];
+};
+
+enum binder_deferred_state {
+	BINDER_DEFERRED_PUT_FILES    = 0x01,
+	BINDER_DEFERRED_FLUSH        = 0x02,
+	BINDER_DEFERRED_RELEASE      = 0x04,
+};
+
+struct binder_proc {
+	struct hlist_node proc_node;
+	struct rb_root threads;
+	struct rb_root nodes;
+	struct rb_root refs_by_desc;
+	struct rb_root refs_by_node;
+	int pid;
+	struct vm_area_struct *vma;
+	struct task_struct *tsk;
+	struct files_struct *files;
+	struct hlist_node deferred_work_node;
+	int deferred_work;
+	void *buffer;
+	ptrdiff_t user_buffer_offset;
+
+	struct list_head buffers;
+	struct rb_root free_buffers;
+	struct rb_root allocated_buffers;
+	size_t free_async_space;
+
+	struct page **pages;
+	size_t buffer_size;
+	uint32_t buffer_free;
+	struct list_head todo;
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+	struct list_head delivered_death;
+	int max_threads;
+	int requested_threads;
+	int requested_threads_started;
+	int ready_threads;
+	long default_priority;
+	struct dentry *debugfs_entry;
+};
+
+enum {
+	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
+	BINDER_LOOPER_STATE_ENTERED     = 0x02,
+	BINDER_LOOPER_STATE_EXITED      = 0x04,
+	BINDER_LOOPER_STATE_INVALID     = 0x08,
+	BINDER_LOOPER_STATE_WAITING     = 0x10,
+	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+	struct binder_proc *proc;
+	struct rb_node rb_node;
+	int pid;
+	int looper;
+	struct binder_transaction *transaction_stack;
+	struct list_head todo;
+	uint32_t return_error; /* Write failed, return error code in read buf */
+	uint32_t return_error2; /* Write failed, return error code in read */
+		/* buffer. Used when sending a reply to a dead process that */
+		/* we are also waiting on */
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+};
+
+struct binder_transaction {
+	int debug_id;
+	struct binder_work work;
+	struct binder_thread *from;
+	struct binder_transaction *from_parent;
+	struct binder_proc *to_proc;
+	struct binder_thread *to_thread;
+	struct binder_transaction *to_parent;
+	unsigned need_reply:1;
+	/* unsigned is_dead:1; */	/* not used at the moment */
+
+	struct binder_buffer *buffer;
+	unsigned int	code;
+	unsigned int	flags;
+	long	priority;
+	long	saved_priority;
+	uid_t	sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+	struct files_struct *files = proc->files;
+	int fd, error;
+	struct fdtable *fdt;
+	unsigned long rlim_cur;
+	unsigned long irqs;
+
+	if (files == NULL)
+		return -ESRCH;
+
+	error = -EMFILE;
+	spin_lock(&files->file_lock);
+
+repeat:
+	fdt = files_fdtable(files);
+	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+				files->next_fd);
+
+	/*
+	 * N.B. For clone tasks sharing a files structure, this test
+	 * will limit the total number of files that can be opened.
+	 */
+	rlim_cur = 0;
+	if (lock_task_sighand(proc->tsk, &irqs)) {
+		rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+		unlock_task_sighand(proc->tsk, &irqs);
+	}
+	if (fd >= rlim_cur)
+		goto out;
+
+	/* Do we need to expand the fd array or fd set?  */
+	error = expand_files(files, fd);
+	if (error < 0)
+		goto out;
+
+	if (error) {
+		/*
+		 * If we needed to expand the fs array we
+		 * might have blocked - try again.
+		 */
+		error = -EMFILE;
+		goto repeat;
+	}
+
+	FD_SET(fd, fdt->open_fds);
+	if (flags & O_CLOEXEC)
+		FD_SET(fd, fdt->close_on_exec);
+	else
+		FD_CLR(fd, fdt->close_on_exec);
+	files->next_fd = fd + 1;
+#if 1
+	/* Sanity check */
+	if (fdt->fd[fd] != NULL) {
+		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+		fdt->fd[fd] = NULL;
+	}
+#endif
+	error = fd;
+
+out:
+	spin_unlock(&files->file_lock);
+	return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+	struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+	struct files_struct *files = proc->files;
+	struct fdtable *fdt;
+
+	if (files == NULL)
+		return;
+
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+	BUG_ON(fdt->fd[fd] != NULL);
+	rcu_assign_pointer(fdt->fd[fd], file);
+	spin_unlock(&files->file_lock);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+	struct fdtable *fdt = files_fdtable(files);
+	__FD_CLR(fd, fdt->open_fds);
+	if (fd < files->next_fd)
+		files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+	struct file *filp;
+	struct files_struct *files = proc->files;
+	struct fdtable *fdt;
+	int retval;
+
+	if (files == NULL)
+		return -ESRCH;
+
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+	if (fd >= fdt->max_fds)
+		goto out_unlock;
+	filp = fdt->fd[fd];
+	if (!filp)
+		goto out_unlock;
+	rcu_assign_pointer(fdt->fd[fd], NULL);
+	FD_CLR(fd, fdt->close_on_exec);
+	__put_unused_fd(files, fd);
+	spin_unlock(&files->file_lock);
+	retval = filp_close(filp, files);
+
+	/* can't restart close syscall because file table entry was cleared */
+	if (unlikely(retval == -ERESTARTSYS ||
+		     retval == -ERESTARTNOINTR ||
+		     retval == -ERESTARTNOHAND ||
+		     retval == -ERESTART_RESTARTBLOCK))
+		retval = -EINTR;
+
+	return retval;
+
+out_unlock:
+	spin_unlock(&files->file_lock);
+	return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+	long min_nice;
+	if (can_nice(current, nice)) {
+		set_user_nice(current, nice);
+		return;
+	}
+	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+		     "binder: %d: nice value %ld not allowed use "
+		     "%ld instead\n", current->pid, nice, min_nice);
+	set_user_nice(current, min_nice);
+	if (min_nice < 20)
+		return;
+	binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+				 struct binder_buffer *buffer)
+{
+	if (list_is_last(&buffer->entry, &proc->buffers))
+		return proc->buffer + proc->buffer_size - (void *)buffer->data;
+	else
+		return (size_t)list_entry(buffer->entry.next,
+			struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+				      struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &proc->free_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	size_t new_buffer_size;
+
+	BUG_ON(!new_buffer->free);
+
+	new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "binder: %d: add free buffer, size %zd, "
+		     "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+
+		buffer_size = binder_buffer_size(proc, buffer);
+
+		if (new_buffer_size < buffer_size)
+			p = &parent->rb_left;
+		else
+			p = &parent->rb_right;
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+					   struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &proc->allocated_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+
+	BUG_ON(new_buffer->free);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (new_buffer < buffer)
+			p = &parent->rb_left;
+		else if (new_buffer > buffer)
+			p = &parent->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+						  void __user *user_ptr)
+{
+	struct rb_node *n = proc->allocated_buffers.rb_node;
+	struct binder_buffer *buffer;
+	struct binder_buffer *kern_ptr;
+
+	kern_ptr = user_ptr - proc->user_buffer_offset
+		- offsetof(struct binder_buffer, data);
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (kern_ptr < buffer)
+			n = n->rb_left;
+		else if (kern_ptr > buffer)
+			n = n->rb_right;
+		else
+			return buffer;
+	}
+	return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+				    void *start, void *end,
+				    struct vm_area_struct *vma)
+{
+	void *page_addr;
+	unsigned long user_page_addr;
+	struct vm_struct tmp_area;
+	struct page **page;
+	struct mm_struct *mm;
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "binder: %d: %s pages %p-%p\n", proc->pid,
+		     allocate ? "allocate" : "free", start, end);
+
+	if (end <= start)
+		return 0;
+
+	if (vma)
+		mm = NULL;
+	else
+		mm = get_task_mm(proc->tsk);
+
+	if (mm) {
+		down_write(&mm->mmap_sem);
+		vma = proc->vma;
+	}
+
+	if (allocate == 0)
+		goto free_range;
+
+	if (vma == NULL) {
+		printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+		       "map pages in userspace, no vma\n", proc->pid);
+		goto err_no_vma;
+	}
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		int ret;
+		struct page **page_array_ptr;
+		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+		BUG_ON(*page);
+		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (*page == NULL) {
+			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+			       "for page at %p\n", proc->pid, page_addr);
+			goto err_alloc_page_failed;
+		}
+		tmp_area.addr = page_addr;
+		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+		page_array_ptr = page;
+		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+		if (ret) {
+			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+			       "to map page at %p in kernel\n",
+			       proc->pid, page_addr);
+			goto err_map_kernel_failed;
+		}
+		user_page_addr =
+			(uintptr_t)page_addr + proc->user_buffer_offset;
+		ret = vm_insert_page(vma, user_page_addr, page[0]);
+		if (ret) {
+			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+			       "to map page at %lx in userspace\n",
+			       proc->pid, user_page_addr);
+			goto err_vm_insert_page_failed;
+		}
+		/* vm_insert_page does not seem to increment the refcount */
+	}
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return 0;
+
+free_range:
+	for (page_addr = end - PAGE_SIZE; page_addr >= start;
+	     page_addr -= PAGE_SIZE) {
+		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+		if (vma)
+			zap_page_range(vma, (uintptr_t)page_addr +
+				proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+		__free_page(*page);
+		*page = NULL;
+err_alloc_page_failed:
+		;
+	}
+err_no_vma:
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+					      size_t data_size,
+					      size_t offsets_size, int is_async)
+{
+	struct rb_node *n = proc->free_buffers.rb_node;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	struct rb_node *best_fit = NULL;
+	void *has_page_addr;
+	void *end_page_addr;
+	size_t size;
+
+	if (proc->vma == NULL) {
+		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+		       proc->pid);
+		return NULL;
+	}
+
+	size = ALIGN(data_size, sizeof(void *)) +
+		ALIGN(offsets_size, sizeof(void *));
+
+	if (size < data_size || size < offsets_size) {
+		binder_user_error("binder: %d: got transaction with invalid "
+			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
+		return NULL;
+	}
+
+	if (is_async &&
+	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "binder: %d: binder_alloc_buf size %zd"
+			     "failed, no async space left\n", proc->pid, size);
+		return NULL;
+	}
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+		buffer_size = binder_buffer_size(proc, buffer);
+
+		if (size < buffer_size) {
+			best_fit = n;
+			n = n->rb_left;
+		} else if (size > buffer_size)
+			n = n->rb_right;
+		else {
+			best_fit = n;
+			break;
+		}
+	}
+	if (best_fit == NULL) {
+		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+		       "no address space\n", proc->pid, size);
+		return NULL;
+	}
+	if (n == NULL) {
+		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+		buffer_size = binder_buffer_size(proc, buffer);
+	}
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "binder: %d: binder_alloc_buf size %zd got buff"
+		     "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+	has_page_addr =
+		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+	if (n == NULL) {
+		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+			buffer_size = size; /* no room for other buffers */
+		else
+			buffer_size = size + sizeof(struct binder_buffer);
+	}
+	end_page_addr =
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+	if (end_page_addr > has_page_addr)
+		end_page_addr = has_page_addr;
+	if (binder_update_page_range(proc, 1,
+	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+		return NULL;
+
+	rb_erase(best_fit, &proc->free_buffers);
+	buffer->free = 0;
+	binder_insert_allocated_buffer(proc, buffer);
+	if (buffer_size != size) {
+		struct binder_buffer *new_buffer = (void *)buffer->data + size;
+		list_add(&new_buffer->entry, &buffer->entry);
+		new_buffer->free = 1;
+		binder_insert_free_buffer(proc, new_buffer);
+	}
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "binder: %d: binder_alloc_buf size %zd got "
+		     "%p\n", proc->pid, size, buffer);
+	buffer->data_size = data_size;
+	buffer->offsets_size = offsets_size;
+	buffer->async_transaction = is_async;
+	if (is_async) {
+		proc->free_async_space -= size + sizeof(struct binder_buffer);
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+			     "binder: %d: binder_alloc_buf size %zd "
+			     "async free %zd\n", proc->pid, size,
+			     proc->free_async_space);
+	}
+
+	return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+	return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+				      struct binder_buffer *buffer)
+{
+	struct binder_buffer *prev, *next = NULL;
+	int free_page_end = 1;
+	int free_page_start = 1;
+
+	BUG_ON(proc->buffers.next == &buffer->entry);
+	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+	BUG_ON(!prev->free);
+	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+		free_page_start = 0;
+		if (buffer_end_page(prev) == buffer_end_page(buffer))
+			free_page_end = 0;
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "binder: %d: merge free, buffer %p "
+			     "share page with %p\n", proc->pid, buffer, prev);
+	}
+
+	if (!list_is_last(&buffer->entry, &proc->buffers)) {
+		next = list_entry(buffer->entry.next,
+				  struct binder_buffer, entry);
+		if (buffer_start_page(next) == buffer_end_page(buffer)) {
+			free_page_end = 0;
+			if (buffer_start_page(next) ==
+			    buffer_start_page(buffer))
+				free_page_start = 0;
+			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				     "binder: %d: merge free, buffer"
+				     " %p share page with %p\n", proc->pid,
+				     buffer, prev);
+		}
+	}
+	list_del(&buffer->entry);
+	if (free_page_start || free_page_end) {
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "binder: %d: merge free, buffer %p do "
+			     "not share page%s%s with with %p or %p\n",
+			     proc->pid, buffer, free_page_start ? "" : " end",
+			     free_page_end ? "" : " start", prev, next);
+		binder_update_page_range(proc, 0, free_page_start ?
+			buffer_start_page(buffer) : buffer_end_page(buffer),
+			(free_page_end ? buffer_end_page(buffer) :
+			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+	}
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+			    struct binder_buffer *buffer)
+{
+	size_t size, buffer_size;
+
+	buffer_size = binder_buffer_size(proc, buffer);
+
+	size = ALIGN(buffer->data_size, sizeof(void *)) +
+		ALIGN(buffer->offsets_size, sizeof(void *));
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "binder: %d: binder_free_buf %p size %zd buffer"
+		     "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+	BUG_ON(buffer->free);
+	BUG_ON(size > buffer_size);
+	BUG_ON(buffer->transaction != NULL);
+	BUG_ON((void *)buffer < proc->buffer);
+	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+	if (buffer->async_transaction) {
+		proc->free_async_space += size + sizeof(struct binder_buffer);
+
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+			     "binder: %d: binder_free_buf size %zd "
+			     "async free %zd\n", proc->pid, size,
+			     proc->free_async_space);
+	}
+
+	binder_update_page_range(proc, 0,
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
+		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+		NULL);
+	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+	buffer->free = 1;
+	if (!list_is_last(&buffer->entry, &proc->buffers)) {
+		struct binder_buffer *next = list_entry(buffer->entry.next,
+						struct binder_buffer, entry);
+		if (next->free) {
+			rb_erase(&next->rb_node, &proc->free_buffers);
+			binder_delete_free_buffer(proc, next);
+		}
+	}
+	if (proc->buffers.next != &buffer->entry) {
+		struct binder_buffer *prev = list_entry(buffer->entry.prev,
+						struct binder_buffer, entry);
+		if (prev->free) {
+			binder_delete_free_buffer(proc, buffer);
+			rb_erase(&prev->rb_node, &proc->free_buffers);
+			buffer = prev;
+		}
+	}
+	binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+					   void __user *ptr)
+{
+	struct rb_node *n = proc->nodes.rb_node;
+	struct binder_node *node;
+
+	while (n) {
+		node = rb_entry(n, struct binder_node, rb_node);
+
+		if (ptr < node->ptr)
+			n = n->rb_left;
+		else if (ptr > node->ptr)
+			n = n->rb_right;
+		else
+			return node;
+	}
+	return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+					   void __user *ptr,
+					   void __user *cookie)
+{
+	struct rb_node **p = &proc->nodes.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_node *node;
+
+	while (*p) {
+		parent = *p;
+		node = rb_entry(parent, struct binder_node, rb_node);
+
+		if (ptr < node->ptr)
+			p = &(*p)->rb_left;
+		else if (ptr > node->ptr)
+			p = &(*p)->rb_right;
+		else
+			return NULL;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL)
+		return NULL;
+	binder_stats_created(BINDER_STAT_NODE);
+	rb_link_node(&node->rb_node, parent, p);
+	rb_insert_color(&node->rb_node, &proc->nodes);
+	node->debug_id = ++binder_last_id;
+	node->proc = proc;
+	node->ptr = ptr;
+	node->cookie = cookie;
+	node->work.type = BINDER_WORK_NODE;
+	INIT_LIST_HEAD(&node->work.entry);
+	INIT_LIST_HEAD(&node->async_todo);
+	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+		     "binder: %d:%d node %d u%p c%p created\n",
+		     proc->pid, current->pid, node->debug_id,
+		     node->ptr, node->cookie);
+	return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+			   struct list_head *target_list)
+{
+	if (strong) {
+		if (internal) {
+			if (target_list == NULL &&
+			    node->internal_strong_refs == 0 &&
+			    !(node == binder_context_mgr_node &&
+			    node->has_strong_ref)) {
+				printk(KERN_ERR "binder: invalid inc strong "
+					"node for %d\n", node->debug_id);
+				return -EINVAL;
+			}
+			node->internal_strong_refs++;
+		} else
+			node->local_strong_refs++;
+		if (!node->has_strong_ref && target_list) {
+			list_del_init(&node->work.entry);
+			list_add_tail(&node->work.entry, target_list);
+		}
+	} else {
+		if (!internal)
+			node->local_weak_refs++;
+		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+			if (target_list == NULL) {
+				printk(KERN_ERR "binder: invalid inc weak node "
+					"for %d\n", node->debug_id);
+				return -EINVAL;
+			}
+			list_add_tail(&node->work.entry, target_list);
+		}
+	}
+	return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+	if (strong) {
+		if (internal)
+			node->internal_strong_refs--;
+		else
+			node->local_strong_refs--;
+		if (node->local_strong_refs || node->internal_strong_refs)
+			return 0;
+	} else {
+		if (!internal)
+			node->local_weak_refs--;
+		if (node->local_weak_refs || !hlist_empty(&node->refs))
+			return 0;
+	}
+	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+		if (list_empty(&node->work.entry)) {
+			list_add_tail(&node->work.entry, &node->proc->todo);
+			wake_up_interruptible(&node->proc->wait);
+		}
+	} else {
+		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+		    !node->local_weak_refs) {
+			list_del_init(&node->work.entry);
+			if (node->proc) {
+				rb_erase(&node->rb_node, &node->proc->nodes);
+				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+					     "binder: refless node %d deleted\n",
+					     node->debug_id);
+			} else {
+				hlist_del(&node->dead_node);
+				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+					     "binder: dead node %d deleted\n",
+					     node->debug_id);
+			}
+			kfree(node);
+			binder_stats_deleted(BINDER_STAT_NODE);
+		}
+	}
+
+	return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+					 uint32_t desc)
+{
+	struct rb_node *n = proc->refs_by_desc.rb_node;
+	struct binder_ref *ref;
+
+	while (n) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+		if (desc < ref->desc)
+			n = n->rb_left;
+		else if (desc > ref->desc)
+			n = n->rb_right;
+		else
+			return ref;
+	}
+	return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+						  struct binder_node *node)
+{
+	struct rb_node *n;
+	struct rb_node **p = &proc->refs_by_node.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_ref *ref, *new_ref;
+
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+		if (node < ref->node)
+			p = &(*p)->rb_left;
+		else if (node > ref->node)
+			p = &(*p)->rb_right;
+		else
+			return ref;
+	}
+	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (new_ref == NULL)
+		return NULL;
+	binder_stats_created(BINDER_STAT_REF);
+	new_ref->debug_id = ++binder_last_id;
+	new_ref->proc = proc;
+	new_ref->node = node;
+	rb_link_node(&new_ref->rb_node_node, parent, p);
+	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		if (ref->desc > new_ref->desc)
+			break;
+		new_ref->desc = ref->desc + 1;
+	}
+
+	p = &proc->refs_by_desc.rb_node;
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+		if (new_ref->desc < ref->desc)
+			p = &(*p)->rb_left;
+		else if (new_ref->desc > ref->desc)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_ref->rb_node_desc, parent, p);
+	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+	if (node) {
+		hlist_add_head(&new_ref->node_entry, &node->refs);
+
+		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+			     "binder: %d new ref %d desc %d for "
+			     "node %d\n", proc->pid, new_ref->debug_id,
+			     new_ref->desc, node->debug_id);
+	} else {
+		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+			     "binder: %d new ref %d desc %d for "
+			     "dead node\n", proc->pid, new_ref->debug_id,
+			      new_ref->desc);
+	}
+	return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+		     "binder: %d delete ref %d desc %d for "
+		     "node %d\n", ref->proc->pid, ref->debug_id,
+		     ref->desc, ref->node->debug_id);
+
+	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+	if (ref->strong)
+		binder_dec_node(ref->node, 1, 1);
+	hlist_del(&ref->node_entry);
+	binder_dec_node(ref->node, 0, 1);
+	if (ref->death) {
+		binder_debug(BINDER_DEBUG_DEAD_BINDER,
+			     "binder: %d delete ref %d desc %d "
+			     "has death notification\n", ref->proc->pid,
+			     ref->debug_id, ref->desc);
+		list_del(&ref->death->work.entry);
+		kfree(ref->death);
+		binder_stats_deleted(BINDER_STAT_DEATH);
+	}
+	kfree(ref);
+	binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+			  struct list_head *target_list)
+{
+	int ret;
+	if (strong) {
+		if (ref->strong == 0) {
+			ret = binder_inc_node(ref->node, 1, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->strong++;
+	} else {
+		if (ref->weak == 0) {
+			ret = binder_inc_node(ref->node, 0, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->weak++;
+	}
+	return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+	if (strong) {
+		if (ref->strong == 0) {
+			binder_user_error("binder: %d invalid dec strong, "
+					  "ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->debug_id,
+					  ref->desc, ref->strong, ref->weak);
+			return -EINVAL;
+		}
+		ref->strong--;
+		if (ref->strong == 0) {
+			int ret;
+			ret = binder_dec_node(ref->node, strong, 1);
+			if (ret)
+				return ret;
+		}
+	} else {
+		if (ref->weak == 0) {
+			binder_user_error("binder: %d invalid dec weak, "
+					  "ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->debug_id,
+					  ref->desc, ref->strong, ref->weak);
+			return -EINVAL;
+		}
+		ref->weak--;
+	}
+	if (ref->strong == 0 && ref->weak == 0)
+		binder_delete_ref(ref);
+	return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+				   struct binder_transaction *t)
+{
+	if (target_thread) {
+		BUG_ON(target_thread->transaction_stack != t);
+		BUG_ON(target_thread->transaction_stack->from != target_thread);
+		target_thread->transaction_stack =
+			target_thread->transaction_stack->from_parent;
+		t->from = NULL;
+	}
+	t->need_reply = 0;
+	if (t->buffer)
+		t->buffer->transaction = NULL;
+	kfree(t);
+	binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+				     uint32_t error_code)
+{
+	struct binder_thread *target_thread;
+	BUG_ON(t->flags & TF_ONE_WAY);
+	while (1) {
+		target_thread = t->from;
+		if (target_thread) {
+			if (target_thread->return_error != BR_OK &&
+			   target_thread->return_error2 == BR_OK) {
+				target_thread->return_error2 =
+					target_thread->return_error;
+				target_thread->return_error = BR_OK;
+			}
+			if (target_thread->return_error == BR_OK) {
+				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+					     "binder: send failed reply for "
+					     "transaction %d to %d:%d\n",
+					      t->debug_id, target_thread->proc->pid,
+					      target_thread->pid);
+
+				binder_pop_transaction(target_thread, t);
+				target_thread->return_error = error_code;
+				wake_up_interruptible(&target_thread->wait);
+			} else {
+				printk(KERN_ERR "binder: reply failed, target "
+					"thread, %d:%d, has error code %d "
+					"already\n", target_thread->proc->pid,
+					target_thread->pid,
+					target_thread->return_error);
+			}
+			return;
+		} else {
+			struct binder_transaction *next = t->from_parent;
+
+			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+				     "binder: send failed reply "
+				     "for transaction %d, target dead\n",
+				     t->debug_id);
+
+			binder_pop_transaction(target_thread, t);
+			if (next == NULL) {
+				binder_debug(BINDER_DEBUG_DEAD_BINDER,
+					     "binder: reply failed,"
+					     " no target thread at root\n");
+				return;
+			}
+			t = next;
+			binder_debug(BINDER_DEBUG_DEAD_BINDER,
+				     "binder: reply failed, no target "
+				     "thread -- retry %d\n", t->debug_id);
+		}
+	}
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+					      struct binder_buffer *buffer,
+					      size_t *failed_at)
+{
+	size_t *offp, *off_end;
+	int debug_id = buffer->debug_id;
+
+	binder_debug(BINDER_DEBUG_TRANSACTION,
+		     "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+		     proc->pid, buffer->debug_id,
+		     buffer->data_size, buffer->offsets_size, failed_at);
+
+	if (buffer->target_node)
+		binder_dec_node(buffer->target_node, 1, 0);
+
+	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+	if (failed_at)
+		off_end = failed_at;
+	else
+		off_end = (void *)offp + buffer->offsets_size;
+	for (; offp < off_end; offp++) {
+		struct flat_binder_object *fp;
+		if (*offp > buffer->data_size - sizeof(*fp) ||
+		    buffer->data_size < sizeof(*fp) ||
+		    !IS_ALIGNED(*offp, sizeof(void *))) {
+			printk(KERN_ERR "binder: transaction release %d bad"
+					"offset %zd, size %zd\n", debug_id,
+					*offp, buffer->data_size);
+			continue;
+		}
+		fp = (struct flat_binder_object *)(buffer->data + *offp);
+		switch (fp->type) {
+		case BINDER_TYPE_BINDER:
+		case BINDER_TYPE_WEAK_BINDER: {
+			struct binder_node *node = binder_get_node(proc, fp->binder);
+			if (node == NULL) {
+				printk(KERN_ERR "binder: transaction release %d"
+				       " bad node %p\n", debug_id, fp->binder);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        node %d u%p\n",
+				     node->debug_id, node->ptr);
+			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+		} break;
+		case BINDER_TYPE_HANDLE:
+		case BINDER_TYPE_WEAK_HANDLE: {
+			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+			if (ref == NULL) {
+				printk(KERN_ERR "binder: transaction release %d"
+				       " bad handle %ld\n", debug_id,
+				       fp->handle);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        ref %d desc %d (node %d)\n",
+				     ref->debug_id, ref->desc, ref->node->debug_id);
+			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+		} break;
+
+		case BINDER_TYPE_FD:
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        fd %ld\n", fp->handle);
+			if (failed_at)
+				task_close_fd(proc, fp->handle);
+			break;
+
+		default:
+			printk(KERN_ERR "binder: transaction release %d bad "
+			       "object type %lx\n", debug_id, fp->type);
+			break;
+		}
+	}
+}
+
+static void binder_transaction(struct binder_proc *proc,
+			       struct binder_thread *thread,
+			       struct binder_transaction_data *tr, int reply)
+{
+	struct binder_transaction *t;
+	struct binder_work *tcomplete;
+	size_t *offp, *off_end;
+	struct binder_proc *target_proc;
+	struct binder_thread *target_thread = NULL;
+	struct binder_node *target_node = NULL;
+	struct list_head *target_list;
+	wait_queue_head_t *target_wait;
+	struct binder_transaction *in_reply_to = NULL;
+	struct binder_transaction_log_entry *e;
+	uint32_t return_error;
+
+	e = binder_transaction_log_add(&binder_transaction_log);
+	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+	e->from_proc = proc->pid;
+	e->from_thread = thread->pid;
+	e->target_handle = tr->target.handle;
+	e->data_size = tr->data_size;
+	e->offsets_size = tr->offsets_size;
+
+	if (reply) {
+		in_reply_to = thread->transaction_stack;
+		if (in_reply_to == NULL) {
+			binder_user_error("binder: %d:%d got reply transaction "
+					  "with no transaction stack\n",
+					  proc->pid, thread->pid);
+			return_error = BR_FAILED_REPLY;
+			goto err_empty_call_stack;
+		}
+		binder_set_nice(in_reply_to->saved_priority);
+		if (in_reply_to->to_thread != thread) {
+			binder_user_error("binder: %d:%d got reply transaction "
+				"with bad transaction stack,"
+				" transaction %d has target %d:%d\n",
+				proc->pid, thread->pid, in_reply_to->debug_id,
+				in_reply_to->to_proc ?
+				in_reply_to->to_proc->pid : 0,
+				in_reply_to->to_thread ?
+				in_reply_to->to_thread->pid : 0);
+			return_error = BR_FAILED_REPLY;
+			in_reply_to = NULL;
+			goto err_bad_call_stack;
+		}
+		thread->transaction_stack = in_reply_to->to_parent;
+		target_thread = in_reply_to->from;
+		if (target_thread == NULL) {
+			return_error = BR_DEAD_REPLY;
+			goto err_dead_binder;
+		}
+		if (target_thread->transaction_stack != in_reply_to) {
+			binder_user_error("binder: %d:%d got reply transaction "
+				"with bad target transaction stack %d, "
+				"expected %d\n",
+				proc->pid, thread->pid,
+				target_thread->transaction_stack ?
+				target_thread->transaction_stack->debug_id : 0,
+				in_reply_to->debug_id);
+			return_error = BR_FAILED_REPLY;
+			in_reply_to = NULL;
+			target_thread = NULL;
+			goto err_dead_binder;
+		}
+		target_proc = target_thread->proc;
+	} else {
+		if (tr->target.handle) {
+			struct binder_ref *ref;
+			ref = binder_get_ref(proc, tr->target.handle);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d got "
+					"transaction to invalid handle\n",
+					proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
+				goto err_invalid_target_handle;
+			}
+			target_node = ref->node;
+		} else {
+			target_node = binder_context_mgr_node;
+			if (target_node == NULL) {
+				return_error = BR_DEAD_REPLY;
+				goto err_no_context_mgr_node;
+			}
+		}
+		e->to_node = target_node->debug_id;
+		target_proc = target_node->proc;
+		if (target_proc == NULL) {
+			return_error = BR_DEAD_REPLY;
+			goto err_dead_binder;
+		}
+		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+			struct binder_transaction *tmp;
+			tmp = thread->transaction_stack;
+			if (tmp->to_thread != thread) {
+				binder_user_error("binder: %d:%d got new "
+					"transaction with bad transaction stack"
+					", transaction %d has target %d:%d\n",
+					proc->pid, thread->pid, tmp->debug_id,
+					tmp->to_proc ? tmp->to_proc->pid : 0,
+					tmp->to_thread ?
+					tmp->to_thread->pid : 0);
+				return_error = BR_FAILED_REPLY;
+				goto err_bad_call_stack;
+			}
+			while (tmp) {
+				if (tmp->from && tmp->from->proc == target_proc)
+					target_thread = tmp->from;
+				tmp = tmp->from_parent;
+			}
+		}
+	}
+	if (target_thread) {
+		e->to_thread = target_thread->pid;
+		target_list = &target_thread->todo;
+		target_wait = &target_thread->wait;
+	} else {
+		target_list = &target_proc->todo;
+		target_wait = &target_proc->wait;
+	}
+	e->to_proc = target_proc->pid;
+
+	/* TODO: reuse incoming transaction for reply */
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (t == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_alloc_t_failed;
+	}
+	binder_stats_created(BINDER_STAT_TRANSACTION);
+
+	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	if (tcomplete == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_alloc_tcomplete_failed;
+	}
+	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+	t->debug_id = ++binder_last_id;
+	e->debug_id = t->debug_id;
+
+	if (reply)
+		binder_debug(BINDER_DEBUG_TRANSACTION,
+			     "binder: %d:%d BC_REPLY %d -> %d:%d, "
+			     "data %p-%p size %zd-%zd\n",
+			     proc->pid, thread->pid, t->debug_id,
+			     target_proc->pid, target_thread->pid,
+			     tr->data.ptr.buffer, tr->data.ptr.offsets,
+			     tr->data_size, tr->offsets_size);
+	else
+		binder_debug(BINDER_DEBUG_TRANSACTION,
+			     "binder: %d:%d BC_TRANSACTION %d -> "
+			     "%d - node %d, data %p-%p size %zd-%zd\n",
+			     proc->pid, thread->pid, t->debug_id,
+			     target_proc->pid, target_node->debug_id,
+			     tr->data.ptr.buffer, tr->data.ptr.offsets,
+			     tr->data_size, tr->offsets_size);
+
+	if (!reply && !(tr->flags & TF_ONE_WAY))
+		t->from = thread;
+	else
+		t->from = NULL;
+	t->sender_euid = proc->tsk->cred->euid;
+	t->to_proc = target_proc;
+	t->to_thread = target_thread;
+	t->code = tr->code;
+	t->flags = tr->flags;
+	t->priority = task_nice(current);
+	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+	if (t->buffer == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_binder_alloc_buf_failed;
+	}
+	t->buffer->allow_user_free = 0;
+	t->buffer->debug_id = t->debug_id;
+	t->buffer->transaction = t;
+	t->buffer->target_node = target_node;
+	if (target_node)
+		binder_inc_node(target_node, 1, 0, NULL);
+
+	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+		binder_user_error("binder: %d:%d got transaction with invalid "
+			"data ptr\n", proc->pid, thread->pid);
+		return_error = BR_FAILED_REPLY;
+		goto err_copy_data_failed;
+	}
+	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+		binder_user_error("binder: %d:%d got transaction with invalid "
+			"offsets ptr\n", proc->pid, thread->pid);
+		return_error = BR_FAILED_REPLY;
+		goto err_copy_data_failed;
+	}
+	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+		binder_user_error("binder: %d:%d got transaction with "
+			"invalid offsets size, %zd\n",
+			proc->pid, thread->pid, tr->offsets_size);
+		return_error = BR_FAILED_REPLY;
+		goto err_bad_offset;
+	}
+	off_end = (void *)offp + tr->offsets_size;
+	for (; offp < off_end; offp++) {
+		struct flat_binder_object *fp;
+		if (*offp > t->buffer->data_size - sizeof(*fp) ||
+		    t->buffer->data_size < sizeof(*fp) ||
+		    !IS_ALIGNED(*offp, sizeof(void *))) {
+			binder_user_error("binder: %d:%d got transaction with "
+				"invalid offset, %zd\n",
+				proc->pid, thread->pid, *offp);
+			return_error = BR_FAILED_REPLY;
+			goto err_bad_offset;
+		}
+		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+		switch (fp->type) {
+		case BINDER_TYPE_BINDER:
+		case BINDER_TYPE_WEAK_BINDER: {
+			struct binder_ref *ref;
+			struct binder_node *node = binder_get_node(proc, fp->binder);
+			if (node == NULL) {
+				node = binder_new_node(proc, fp->binder, fp->cookie);
+				if (node == NULL) {
+					return_error = BR_FAILED_REPLY;
+					goto err_binder_new_node_failed;
+				}
+				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+			}
+			if (fp->cookie != node->cookie) {
+				binder_user_error("binder: %d:%d sending u%p "
+					"node %d, cookie mismatch %p != %p\n",
+					proc->pid, thread->pid,
+					fp->binder, node->debug_id,
+					fp->cookie, node->cookie);
+				goto err_binder_get_ref_for_node_failed;
+			}
+			ref = binder_get_ref_for_node(target_proc, node);
+			if (ref == NULL) {
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_for_node_failed;
+			}
+			if (fp->type == BINDER_TYPE_BINDER)
+				fp->type = BINDER_TYPE_HANDLE;
+			else
+				fp->type = BINDER_TYPE_WEAK_HANDLE;
+			fp->handle = ref->desc;
+			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+				       &thread->todo);
+
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        node %d u%p -> ref %d desc %d\n",
+				     node->debug_id, node->ptr, ref->debug_id,
+				     ref->desc);
+		} break;
+		case BINDER_TYPE_HANDLE:
+		case BINDER_TYPE_WEAK_HANDLE: {
+			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d got "
+					"transaction with invalid "
+					"handle, %ld\n", proc->pid,
+					thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_failed;
+			}
+			if (ref->node->proc == target_proc) {
+				if (fp->type == BINDER_TYPE_HANDLE)
+					fp->type = BINDER_TYPE_BINDER;
+				else
+					fp->type = BINDER_TYPE_WEAK_BINDER;
+				fp->binder = ref->node->ptr;
+				fp->cookie = ref->node->cookie;
+				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+				binder_debug(BINDER_DEBUG_TRANSACTION,
+					     "        ref %d desc %d -> node %d u%p\n",
+					     ref->debug_id, ref->desc, ref->node->debug_id,
+					     ref->node->ptr);
+			} else {
+				struct binder_ref *new_ref;
+				new_ref = binder_get_ref_for_node(target_proc, ref->node);
+				if (new_ref == NULL) {
+					return_error = BR_FAILED_REPLY;
+					goto err_binder_get_ref_for_node_failed;
+				}
+				fp->handle = new_ref->desc;
+				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+				binder_debug(BINDER_DEBUG_TRANSACTION,
+					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+					     ref->debug_id, ref->desc, new_ref->debug_id,
+					     new_ref->desc, ref->node->debug_id);
+			}
+		} break;
+
+		case BINDER_TYPE_FD: {
+			int target_fd;
+			struct file *file;
+
+			if (reply) {
+				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+					binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+						proc->pid, thread->pid, fp->handle);
+					return_error = BR_FAILED_REPLY;
+					goto err_fd_not_allowed;
+				}
+			} else if (!target_node->accept_fds) {
+				binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+					proc->pid, thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_fd_not_allowed;
+			}
+
+			file = fget(fp->handle);
+			if (file == NULL) {
+				binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+					proc->pid, thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_fget_failed;
+			}
+			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+			if (target_fd < 0) {
+				fput(file);
+				return_error = BR_FAILED_REPLY;
+				goto err_get_unused_fd_failed;
+			}
+			task_fd_install(target_proc, target_fd, file);
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        fd %ld -> %d\n", fp->handle, target_fd);
+			/* TODO: fput? */
+			fp->handle = target_fd;
+		} break;
+
+		default:
+			binder_user_error("binder: %d:%d got transactio"
+				"n with invalid object type, %lx\n",
+				proc->pid, thread->pid, fp->type);
+			return_error = BR_FAILED_REPLY;
+			goto err_bad_object_type;
+		}
+	}
+	if (reply) {
+		BUG_ON(t->buffer->async_transaction != 0);
+		binder_pop_transaction(target_thread, in_reply_to);
+	} else if (!(t->flags & TF_ONE_WAY)) {
+		BUG_ON(t->buffer->async_transaction != 0);
+		t->need_reply = 1;
+		t->from_parent = thread->transaction_stack;
+		thread->transaction_stack = t;
+	} else {
+		BUG_ON(target_node == NULL);
+		BUG_ON(t->buffer->async_transaction != 1);
+		if (target_node->has_async_transaction) {
+			target_list = &target_node->async_todo;
+			target_wait = NULL;
+		} else
+			target_node->has_async_transaction = 1;
+	}
+	t->work.type = BINDER_WORK_TRANSACTION;
+	list_add_tail(&t->work.entry, target_list);
+	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+	list_add_tail(&tcomplete->entry, &thread->todo);
+	if (target_wait)
+		wake_up_interruptible(target_wait);
+	return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	t->buffer->transaction = NULL;
+	binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+	kfree(tcomplete);
+	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+	kfree(t);
+	binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+		     "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+		     proc->pid, thread->pid, return_error,
+		     tr->data_size, tr->offsets_size);
+
+	{
+		struct binder_transaction_log_entry *fe;
+		fe = binder_transaction_log_add(&binder_transaction_log_failed);
+		*fe = *e;
+	}
+
+	BUG_ON(thread->return_error != BR_OK);
+	if (in_reply_to) {
+		thread->return_error = BR_TRANSACTION_COMPLETE;
+		binder_send_failed_reply(in_reply_to, return_error);
+	} else
+		thread->return_error = return_error;
+}
+
+int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+			void __user *buffer, int size, signed long *consumed)
+{
+	uint32_t cmd;
+	void __user *ptr = buffer + *consumed;
+	void __user *end = buffer + size;
+
+	while (ptr < end && thread->return_error == BR_OK) {
+		if (get_user(cmd, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+			binder_stats.bc[_IOC_NR(cmd)]++;
+			proc->stats.bc[_IOC_NR(cmd)]++;
+			thread->stats.bc[_IOC_NR(cmd)]++;
+		}
+		switch (cmd) {
+		case BC_INCREFS:
+		case BC_ACQUIRE:
+		case BC_RELEASE:
+		case BC_DECREFS: {
+			uint32_t target;
+			struct binder_ref *ref;
+			const char *debug_string;
+
+			if (get_user(target, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (target == 0 && binder_context_mgr_node &&
+			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+				ref = binder_get_ref_for_node(proc,
+					       binder_context_mgr_node);
+				if (ref->desc != target) {
+					binder_user_error("binder: %d:"
+						"%d tried to acquire "
+						"reference to desc 0, "
+						"got %d instead\n",
+						proc->pid, thread->pid,
+						ref->desc);
+				}
+			} else
+				ref = binder_get_ref(proc, target);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d refcou"
+					"nt change on invalid ref %d\n",
+					proc->pid, thread->pid, target);
+				break;
+			}
+			switch (cmd) {
+			case BC_INCREFS:
+				debug_string = "IncRefs";
+				binder_inc_ref(ref, 0, NULL);
+				break;
+			case BC_ACQUIRE:
+				debug_string = "Acquire";
+				binder_inc_ref(ref, 1, NULL);
+				break;
+			case BC_RELEASE:
+				debug_string = "Release";
+				binder_dec_ref(ref, 1);
+				break;
+			case BC_DECREFS:
+			default:
+				debug_string = "DecRefs";
+				binder_dec_ref(ref, 0);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_USER_REFS,
+				     "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+				     proc->pid, thread->pid, debug_string, ref->debug_id,
+				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+			break;
+		}
+		case BC_INCREFS_DONE:
+		case BC_ACQUIRE_DONE: {
+			void __user *node_ptr;
+			void *cookie;
+			struct binder_node *node;
+
+			if (get_user(node_ptr, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			if (get_user(cookie, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			node = binder_get_node(proc, node_ptr);
+			if (node == NULL) {
+				binder_user_error("binder: %d:%d "
+					"%s u%p no match\n",
+					proc->pid, thread->pid,
+					cmd == BC_INCREFS_DONE ?
+					"BC_INCREFS_DONE" :
+					"BC_ACQUIRE_DONE",
+					node_ptr);
+				break;
+			}
+			if (cookie != node->cookie) {
+				binder_user_error("binder: %d:%d %s u%p node %d"
+					" cookie mismatch %p != %p\n",
+					proc->pid, thread->pid,
+					cmd == BC_INCREFS_DONE ?
+					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+					node_ptr, node->debug_id,
+					cookie, node->cookie);
+				break;
+			}
+			if (cmd == BC_ACQUIRE_DONE) {
+				if (node->pending_strong_ref == 0) {
+					binder_user_error("binder: %d:%d "
+						"BC_ACQUIRE_DONE node %d has "
+						"no pending acquire request\n",
+						proc->pid, thread->pid,
+						node->debug_id);
+					break;
+				}
+				node->pending_strong_ref = 0;
+			} else {
+				if (node->pending_weak_ref == 0) {
+					binder_user_error("binder: %d:%d "
+						"BC_INCREFS_DONE node %d has "
+						"no pending increfs request\n",
+						proc->pid, thread->pid,
+						node->debug_id);
+					break;
+				}
+				node->pending_weak_ref = 0;
+			}
+			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+			binder_debug(BINDER_DEBUG_USER_REFS,
+				     "binder: %d:%d %s node %d ls %d lw %d\n",
+				     proc->pid, thread->pid,
+				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
+			break;
+		}
+		case BC_ATTEMPT_ACQUIRE:
+			printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+			return -EINVAL;
+		case BC_ACQUIRE_RESULT:
+			printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+			return -EINVAL;
+
+		case BC_FREE_BUFFER: {
+			void __user *data_ptr;
+			struct binder_buffer *buffer;
+
+			if (get_user(data_ptr, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+
+			buffer = binder_buffer_lookup(proc, data_ptr);
+			if (buffer == NULL) {
+				binder_user_error("binder: %d:%d "
+					"BC_FREE_BUFFER u%p no match\n",
+					proc->pid, thread->pid, data_ptr);
+				break;
+			}
+			if (!buffer->allow_user_free) {
+				binder_user_error("binder: %d:%d "
+					"BC_FREE_BUFFER u%p matched "
+					"unreturned buffer\n",
+					proc->pid, thread->pid, data_ptr);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_FREE_BUFFER,
+				     "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+				     proc->pid, thread->pid, data_ptr, buffer->debug_id,
+				     buffer->transaction ? "active" : "finished");
+
+			if (buffer->transaction) {
+				buffer->transaction->buffer = NULL;
+				buffer->transaction = NULL;
+			}
+			if (buffer->async_transaction && buffer->target_node) {
+				BUG_ON(!buffer->target_node->has_async_transaction);
+				if (list_empty(&buffer->target_node->async_todo))
+					buffer->target_node->has_async_transaction = 0;
+				else
+					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+			}
+			binder_transaction_buffer_release(proc, buffer, NULL);
+			binder_free_buf(proc, buffer);
+			break;
+		}
+
+		case BC_TRANSACTION:
+		case BC_REPLY: {
+			struct binder_transaction_data tr;
+
+			if (copy_from_user(&tr, ptr, sizeof(tr)))
+				return -EFAULT;
+			ptr += sizeof(tr);
+			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+			break;
+		}
+
+		case BC_REGISTER_LOOPER:
+			binder_debug(BINDER_DEBUG_THREADS,
+				     "binder: %d:%d BC_REGISTER_LOOPER\n",
+				     proc->pid, thread->pid);
+			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("binder: %d:%d ERROR:"
+					" BC_REGISTER_LOOPER called "
+					"after BC_ENTER_LOOPER\n",
+					proc->pid, thread->pid);
+			} else if (proc->requested_threads == 0) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("binder: %d:%d ERROR:"
+					" BC_REGISTER_LOOPER called "
+					"without request\n",
+					proc->pid, thread->pid);
+			} else {
+				proc->requested_threads--;
+				proc->requested_threads_started++;
+			}
+			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+			break;
+		case BC_ENTER_LOOPER:
+			binder_debug(BINDER_DEBUG_THREADS,
+				     "binder: %d:%d BC_ENTER_LOOPER\n",
+				     proc->pid, thread->pid);
+			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("binder: %d:%d ERROR:"
+					" BC_ENTER_LOOPER called after "
+					"BC_REGISTER_LOOPER\n",
+					proc->pid, thread->pid);
+			}
+			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+			break;
+		case BC_EXIT_LOOPER:
+			binder_debug(BINDER_DEBUG_THREADS,
+				     "binder: %d:%d BC_EXIT_LOOPER\n",
+				     proc->pid, thread->pid);
+			thread->looper |= BINDER_LOOPER_STATE_EXITED;
+			break;
+
+		case BC_REQUEST_DEATH_NOTIFICATION:
+		case BC_CLEAR_DEATH_NOTIFICATION: {
+			uint32_t target;
+			void __user *cookie;
+			struct binder_ref *ref;
+			struct binder_ref_death *death;
+
+			if (get_user(target, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (get_user(cookie, (void __user * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			ref = binder_get_ref(proc, target);
+			if (ref == NULL) {
+				binder_user_error("binder: %d:%d %s "
+					"invalid ref %d\n",
+					proc->pid, thread->pid,
+					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+					"BC_REQUEST_DEATH_NOTIFICATION" :
+					"BC_CLEAR_DEATH_NOTIFICATION",
+					target);
+				break;
+			}
+
+			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+				     "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+				     proc->pid, thread->pid,
+				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+				     "BC_REQUEST_DEATH_NOTIFICATION" :
+				     "BC_CLEAR_DEATH_NOTIFICATION",
+				     cookie, ref->debug_id, ref->desc,
+				     ref->strong, ref->weak, ref->node->debug_id);
+
+			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+				if (ref->death) {
+					binder_user_error("binder: %d:%"
+						"d BC_REQUEST_DEATH_NOTI"
+						"FICATION death notific"
+						"ation already set\n",
+						proc->pid, thread->pid);
+					break;
+				}
+				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				if (death == NULL) {
+					thread->return_error = BR_ERROR;
+					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+						     "binder: %d:%d "
+						     "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+						     proc->pid, thread->pid);
+					break;
+				}
+				binder_stats_created(BINDER_STAT_DEATH);
+				INIT_LIST_HEAD(&death->work.entry);
+				death->cookie = cookie;
+				ref->death = death;
+				if (ref->node->proc == NULL) {
+					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+						list_add_tail(&ref->death->work.entry, &thread->todo);
+					} else {
+						list_add_tail(&ref->death->work.entry, &proc->todo);
+						wake_up_interruptible(&proc->wait);
+					}
+				}
+			} else {
+				if (ref->death == NULL) {
+					binder_user_error("binder: %d:%"
+						"d BC_CLEAR_DEATH_NOTIFI"
+						"CATION death notificat"
+						"ion not active\n",
+						proc->pid, thread->pid);
+					break;
+				}
+				death = ref->death;
+				if (death->cookie != cookie) {
+					binder_user_error("binder: %d:%"
+						"d BC_CLEAR_DEATH_NOTIFI"
+						"CATION death notificat"
+						"ion cookie mismatch "
+						"%p != %p\n",
+						proc->pid, thread->pid,
+						death->cookie, cookie);
+					break;
+				}
+				ref->death = NULL;
+				if (list_empty(&death->work.entry)) {
+					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+						list_add_tail(&death->work.entry, &thread->todo);
+					} else {
+						list_add_tail(&death->work.entry, &proc->todo);
+						wake_up_interruptible(&proc->wait);
+					}
+				} else {
+					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+				}
+			}
+		} break;
+		case BC_DEAD_BINDER_DONE: {
+			struct binder_work *w;
+			void __user *cookie;
+			struct binder_ref_death *death = NULL;
+			if (get_user(cookie, (void __user * __user *)ptr))
+				return -EFAULT;
+
+			ptr += sizeof(void *);
+			list_for_each_entry(w, &proc->delivered_death, entry) {
+				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+				if (tmp_death->cookie == cookie) {
+					death = tmp_death;
+					break;
+				}
+			}
+			binder_debug(BINDER_DEBUG_DEAD_BINDER,
+				     "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+				     proc->pid, thread->pid, cookie, death);
+			if (death == NULL) {
+				binder_user_error("binder: %d:%d BC_DEAD"
+					"_BINDER_DONE %p not found\n",
+					proc->pid, thread->pid, cookie);
+				break;
+			}
+
+			list_del_init(&death->work.entry);
+			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+					list_add_tail(&death->work.entry, &thread->todo);
+				} else {
+					list_add_tail(&death->work.entry, &proc->todo);
+					wake_up_interruptible(&proc->wait);
+				}
+			}
+		} break;
+
+		default:
+			printk(KERN_ERR "binder: %d:%d unknown command %d\n",
+			       proc->pid, thread->pid, cmd);
+			return -EINVAL;
+		}
+		*consumed = ptr - buffer;
+	}
+	return 0;
+}
+
+void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
+		    uint32_t cmd)
+{
+	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+		binder_stats.br[_IOC_NR(cmd)]++;
+		proc->stats.br[_IOC_NR(cmd)]++;
+		thread->stats.br[_IOC_NR(cmd)]++;
+	}
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+				struct binder_thread *thread)
+{
+	return !list_empty(&proc->todo) ||
+		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+			      struct binder_thread *thread,
+			      void  __user *buffer, int size,
+			      signed long *consumed, int non_block)
+{
+	void __user *ptr = buffer + *consumed;
+	void __user *end = buffer + size;
+
+	int ret = 0;
+	int wait_for_proc_work;
+
+	if (*consumed == 0) {
+		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+	}
+
+retry:
+	wait_for_proc_work = thread->transaction_stack == NULL &&
+				list_empty(&thread->todo);
+
+	if (thread->return_error != BR_OK && ptr < end) {
+		if (thread->return_error2 != BR_OK) {
+			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (ptr == end)
+				goto done;
+			thread->return_error2 = BR_OK;
+		}
+		if (put_user(thread->return_error, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		thread->return_error = BR_OK;
+		goto done;
+	}
+
+
+	thread->looper |= BINDER_LOOPER_STATE_WAITING;
+	if (wait_for_proc_work)
+		proc->ready_threads++;
+	mutex_unlock(&binder_lock);
+	if (wait_for_proc_work) {
+		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+					BINDER_LOOPER_STATE_ENTERED))) {
+			binder_user_error("binder: %d:%d ERROR: Thread waiting "
+				"for process work before calling BC_REGISTER_"
+				"LOOPER or BC_ENTER_LOOPER (state %x)\n",
+				proc->pid, thread->pid, thread->looper);
+			wait_event_interruptible(binder_user_error_wait,
+						 binder_stop_on_user_error < 2);
+		}
+		binder_set_nice(proc->default_priority);
+		if (non_block) {
+			if (!binder_has_proc_work(proc, thread))
+				ret = -EAGAIN;
+		} else
+			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+	} else {
+		if (non_block) {
+			if (!binder_has_thread_work(thread))
+				ret = -EAGAIN;
+		} else
+			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+	}
+	mutex_lock(&binder_lock);
+	if (wait_for_proc_work)
+		proc->ready_threads--;
+	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+	if (ret)
+		return ret;
+
+	while (1) {
+		uint32_t cmd;
+		struct binder_transaction_data tr;
+		struct binder_work *w;
+		struct binder_transaction *t = NULL;
+
+		if (!list_empty(&thread->todo))
+			w = list_first_entry(&thread->todo, struct binder_work, entry);
+		else if (!list_empty(&proc->todo) && wait_for_proc_work)
+			w = list_first_entry(&proc->todo, struct binder_work, entry);
+		else {
+			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+				goto retry;
+			break;
+		}
+
+		if (end - ptr < sizeof(tr) + 4)
+			break;
+
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION: {
+			t = container_of(w, struct binder_transaction, work);
+		} break;
+		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			cmd = BR_TRANSACTION_COMPLETE;
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+
+			binder_stat_br(proc, thread, cmd);
+			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+				     "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+				     proc->pid, thread->pid);
+
+			list_del(&w->entry);
+			kfree(w);
+			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+		} break;
+		case BINDER_WORK_NODE: {
+			struct binder_node *node = container_of(w, struct binder_node, work);
+			uint32_t cmd = BR_NOOP;
+			const char *cmd_name;
+			int strong = node->internal_strong_refs || node->local_strong_refs;
+			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+			if (weak && !node->has_weak_ref) {
+				cmd = BR_INCREFS;
+				cmd_name = "BR_INCREFS";
+				node->has_weak_ref = 1;
+				node->pending_weak_ref = 1;
+				node->local_weak_refs++;
+			} else if (strong && !node->has_strong_ref) {
+				cmd = BR_ACQUIRE;
+				cmd_name = "BR_ACQUIRE";
+				node->has_strong_ref = 1;
+				node->pending_strong_ref = 1;
+				node->local_strong_refs++;
+			} else if (!strong && node->has_strong_ref) {
+				cmd = BR_RELEASE;
+				cmd_name = "BR_RELEASE";
+				node->has_strong_ref = 0;
+			} else if (!weak && node->has_weak_ref) {
+				cmd = BR_DECREFS;
+				cmd_name = "BR_DECREFS";
+				node->has_weak_ref = 0;
+			}
+			if (cmd != BR_NOOP) {
+				if (put_user(cmd, (uint32_t __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(uint32_t);
+				if (put_user(node->ptr, (void * __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(void *);
+				if (put_user(node->cookie, (void * __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(void *);
+
+				binder_stat_br(proc, thread, cmd);
+				binder_debug(BINDER_DEBUG_USER_REFS,
+					     "binder: %d:%d %s %d u%p c%p\n",
+					     proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+			} else {
+				list_del_init(&w->entry);
+				if (!weak && !strong) {
+					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+						     "binder: %d:%d node %d u%p c%p deleted\n",
+						     proc->pid, thread->pid, node->debug_id,
+						     node->ptr, node->cookie);
+					rb_erase(&node->rb_node, &proc->nodes);
+					kfree(node);
+					binder_stats_deleted(BINDER_STAT_NODE);
+				} else {
+					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+						     "binder: %d:%d node %d u%p c%p state unchanged\n",
+						     proc->pid, thread->pid, node->debug_id, node->ptr,
+						     node->cookie);
+				}
+			}
+		} break;
+		case BINDER_WORK_DEAD_BINDER:
+		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+			struct binder_ref_death *death;
+			uint32_t cmd;
+
+			death = container_of(w, struct binder_ref_death, work);
+			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+			else
+				cmd = BR_DEAD_BINDER;
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (put_user(death->cookie, (void * __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(void *);
+			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+				     "binder: %d:%d %s %p\n",
+				      proc->pid, thread->pid,
+				      cmd == BR_DEAD_BINDER ?
+				      "BR_DEAD_BINDER" :
+				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+				      death->cookie);
+
+			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+				list_del(&w->entry);
+				kfree(death);
+				binder_stats_deleted(BINDER_STAT_DEATH);
+			} else
+				list_move(&w->entry, &proc->delivered_death);
+			if (cmd == BR_DEAD_BINDER)
+				goto done; /* DEAD_BINDER notifications can cause transactions */
+		} break;
+		}
+
+		if (!t)
+			continue;
+
+		BUG_ON(t->buffer == NULL);
+		if (t->buffer->target_node) {
+			struct binder_node *target_node = t->buffer->target_node;
+			tr.target.ptr = target_node->ptr;
+			tr.cookie =  target_node->cookie;
+			t->saved_priority = task_nice(current);
+			if (t->priority < target_node->min_priority &&
+			    !(t->flags & TF_ONE_WAY))
+				binder_set_nice(t->priority);
+			else if (!(t->flags & TF_ONE_WAY) ||
+				 t->saved_priority > target_node->min_priority)
+				binder_set_nice(target_node->min_priority);
+			cmd = BR_TRANSACTION;
+		} else {
+			tr.target.ptr = NULL;
+			tr.cookie = NULL;
+			cmd = BR_REPLY;
+		}
+		tr.code = t->code;
+		tr.flags = t->flags;
+		tr.sender_euid = t->sender_euid;
+
+		if (t->from) {
+			struct task_struct *sender = t->from->proc->tsk;
+			tr.sender_pid = task_tgid_nr_ns(sender,
+							current->nsproxy->pid_ns);
+		} else {
+			tr.sender_pid = 0;
+		}
+
+		tr.data_size = t->buffer->data_size;
+		tr.offsets_size = t->buffer->offsets_size;
+		tr.data.ptr.buffer = (void *)t->buffer->data +
+					proc->user_buffer_offset;
+		tr.data.ptr.offsets = tr.data.ptr.buffer +
+					ALIGN(t->buffer->data_size,
+					    sizeof(void *));
+
+		if (put_user(cmd, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		if (copy_to_user(ptr, &tr, sizeof(tr)))
+			return -EFAULT;
+		ptr += sizeof(tr);
+
+		binder_stat_br(proc, thread, cmd);
+		binder_debug(BINDER_DEBUG_TRANSACTION,
+			     "binder: %d:%d %s %d %d:%d, cmd %d"
+			     "size %zd-%zd ptr %p-%p\n",
+			     proc->pid, thread->pid,
+			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+			     "BR_REPLY",
+			     t->debug_id, t->from ? t->from->proc->pid : 0,
+			     t->from ? t->from->pid : 0, cmd,
+			     t->buffer->data_size, t->buffer->offsets_size,
+			     tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+		list_del(&t->work.entry);
+		t->buffer->allow_user_free = 1;
+		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+			t->to_parent = thread->transaction_stack;
+			t->to_thread = thread;
+			thread->transaction_stack = t;
+		} else {
+			t->buffer->transaction = NULL;
+			kfree(t);
+			binder_stats_deleted(BINDER_STAT_TRANSACTION);
+		}
+		break;
+	}
+
+done:
+
+	*consumed = ptr - buffer;
+	if (proc->requested_threads + proc->ready_threads == 0 &&
+	    proc->requested_threads_started < proc->max_threads &&
+	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+	     /*spawn a new thread if we leave this out */) {
+		proc->requested_threads++;
+		binder_debug(BINDER_DEBUG_THREADS,
+			     "binder: %d:%d BR_SPAWN_LOOPER\n",
+			     proc->pid, thread->pid);
+		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+	struct binder_work *w;
+	while (!list_empty(list)) {
+		w = list_first_entry(list, struct binder_work, entry);
+		list_del_init(&w->entry);
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION: {
+			struct binder_transaction *t;
+
+			t = container_of(w, struct binder_transaction, work);
+			if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+				binder_send_failed_reply(t, BR_DEAD_REPLY);
+		} break;
+		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			kfree(w);
+			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+		} break;
+		default:
+			break;
+		}
+	}
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+	struct binder_thread *thread = NULL;
+	struct rb_node *parent = NULL;
+	struct rb_node **p = &proc->threads.rb_node;
+
+	while (*p) {
+		parent = *p;
+		thread = rb_entry(parent, struct binder_thread, rb_node);
+
+		if (current->pid < thread->pid)
+			p = &(*p)->rb_left;
+		else if (current->pid > thread->pid)
+			p = &(*p)->rb_right;
+		else
+			break;
+	}
+	if (*p == NULL) {
+		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		if (thread == NULL)
+			return NULL;
+		binder_stats_created(BINDER_STAT_THREAD);
+		thread->proc = proc;
+		thread->pid = current->pid;
+		init_waitqueue_head(&thread->wait);
+		INIT_LIST_HEAD(&thread->todo);
+		rb_link_node(&thread->rb_node, parent, p);
+		rb_insert_color(&thread->rb_node, &proc->threads);
+		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		thread->return_error = BR_OK;
+		thread->return_error2 = BR_OK;
+	}
+	return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+			      struct binder_thread *thread)
+{
+	struct binder_transaction *t;
+	struct binder_transaction *send_reply = NULL;
+	int active_transactions = 0;
+
+	rb_erase(&thread->rb_node, &proc->threads);
+	t = thread->transaction_stack;
+	if (t && t->to_thread == thread)
+		send_reply = t;
+	while (t) {
+		active_transactions++;
+		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+			     "binder: release %d:%d transaction %d "
+			     "%s, still active\n", proc->pid, thread->pid,
+			     t->debug_id,
+			     (t->to_thread == thread) ? "in" : "out");
+
+		if (t->to_thread == thread) {
+			t->to_proc = NULL;
+			t->to_thread = NULL;
+			if (t->buffer) {
+				t->buffer->transaction = NULL;
+				t->buffer = NULL;
+			}
+			t = t->to_parent;
+		} else if (t->from == thread) {
+			t->from = NULL;
+			t = t->from_parent;
+		} else
+			BUG();
+	}
+	if (send_reply)
+		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+	binder_release_work(&thread->todo);
+	kfree(thread);
+	binder_stats_deleted(BINDER_STAT_THREAD);
+	return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+				struct poll_table_struct *wait)
+{
+	struct binder_proc *proc = filp->private_data;
+	struct binder_thread *thread = NULL;
+	int wait_for_proc_work;
+
+	mutex_lock(&binder_lock);
+	thread = binder_get_thread(proc);
+
+	wait_for_proc_work = thread->transaction_stack == NULL &&
+		list_empty(&thread->todo) && thread->return_error == BR_OK;
+	mutex_unlock(&binder_lock);
+
+	if (wait_for_proc_work) {
+		if (binder_has_proc_work(proc, thread))
+			return POLLIN;
+		poll_wait(filp, &proc->wait, wait);
+		if (binder_has_proc_work(proc, thread))
+			return POLLIN;
+	} else {
+		if (binder_has_thread_work(thread))
+			return POLLIN;
+		poll_wait(filp, &thread->wait, wait);
+		if (binder_has_thread_work(thread))
+			return POLLIN;
+	}
+	return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+	struct binder_proc *proc = filp->private_data;
+	struct binder_thread *thread;
+	unsigned int size = _IOC_SIZE(cmd);
+	void __user *ubuf = (void __user *)arg;
+
+	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+	if (ret)
+		return ret;
+
+	mutex_lock(&binder_lock);
+	thread = binder_get_thread(proc);
+	if (thread == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	switch (cmd) {
+	case BINDER_WRITE_READ: {
+		struct binder_write_read bwr;
+		if (size != sizeof(struct binder_write_read)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+			ret = -EFAULT;
+			goto err;
+		}
+		binder_debug(BINDER_DEBUG_READ_WRITE,
+			     "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+			     proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
+			     bwr.read_size, bwr.read_buffer);
+
+		if (bwr.write_size > 0) {
+			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+			if (ret < 0) {
+				bwr.read_consumed = 0;
+				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+					ret = -EFAULT;
+				goto err;
+			}
+		}
+		if (bwr.read_size > 0) {
+			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+			if (!list_empty(&proc->todo))
+				wake_up_interruptible(&proc->wait);
+			if (ret < 0) {
+				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+					ret = -EFAULT;
+				goto err;
+			}
+		}
+		binder_debug(BINDER_DEBUG_READ_WRITE,
+			     "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
+			     bwr.read_consumed, bwr.read_size);
+		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+			ret = -EFAULT;
+			goto err;
+		}
+		break;
+	}
+	case BINDER_SET_MAX_THREADS:
+		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	case BINDER_SET_CONTEXT_MGR:
+		if (binder_context_mgr_node != NULL) {
+			printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+			ret = -EBUSY;
+			goto err;
+		}
+		if (binder_context_mgr_uid != -1) {
+			if (binder_context_mgr_uid != current->cred->euid) {
+				printk(KERN_ERR "binder: BINDER_SET_"
+				       "CONTEXT_MGR bad uid %d != %d\n",
+				       current->cred->euid,
+				       binder_context_mgr_uid);
+				ret = -EPERM;
+				goto err;
+			}
+		} else
+			binder_context_mgr_uid = current->cred->euid;
+		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+		if (binder_context_mgr_node == NULL) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		binder_context_mgr_node->local_weak_refs++;
+		binder_context_mgr_node->local_strong_refs++;
+		binder_context_mgr_node->has_strong_ref = 1;
+		binder_context_mgr_node->has_weak_ref = 1;
+		break;
+	case BINDER_THREAD_EXIT:
+		binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
+			     proc->pid, thread->pid);
+		binder_free_thread(proc, thread);
+		thread = NULL;
+		break;
+	case BINDER_VERSION:
+		if (size != sizeof(struct binder_version)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = 0;
+err:
+	if (thread)
+		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+	mutex_unlock(&binder_lock);
+	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+	if (ret && ret != -ERESTARTSYS)
+		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+	return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+	struct binder_proc *proc = vma->vm_private_data;
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     proc->pid, vma->vm_start, vma->vm_end,
+		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+		     (unsigned long)pgprot_val(vma->vm_page_prot));
+	dump_stack();
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+	struct binder_proc *proc = vma->vm_private_data;
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     proc->pid, vma->vm_start, vma->vm_end,
+		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+		     (unsigned long)pgprot_val(vma->vm_page_prot));
+	proc->vma = NULL;
+	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+	.open = binder_vma_open,
+	.close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+	struct vm_struct *area;
+	struct binder_proc *proc = filp->private_data;
+	const char *failure_string;
+	struct binder_buffer *buffer;
+
+	if ((vma->vm_end - vma->vm_start) > SZ_4M)
+		vma->vm_end = vma->vm_start + SZ_4M;
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     proc->pid, vma->vm_start, vma->vm_end,
+		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+		     (unsigned long)pgprot_val(vma->vm_page_prot));
+
+	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+		ret = -EPERM;
+		failure_string = "bad vm_flags";
+		goto err_bad_arg;
+	}
+	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+	if (proc->buffer) {
+		ret = -EBUSY;
+		failure_string = "already mapped";
+		goto err_already_mapped;
+	}
+
+	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+	if (area == NULL) {
+		ret = -ENOMEM;
+		failure_string = "get_vm_area";
+		goto err_get_vm_area_failed;
+	}
+	proc->buffer = area->addr;
+	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+	if (cache_is_vipt_aliasing()) {
+		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+			printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+			vma->vm_start += PAGE_SIZE;
+		}
+	}
+#endif
+	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+	if (proc->pages == NULL) {
+		ret = -ENOMEM;
+		failure_string = "alloc page array";
+		goto err_alloc_pages_failed;
+	}
+	proc->buffer_size = vma->vm_end - vma->vm_start;
+
+	vma->vm_ops = &binder_vm_ops;
+	vma->vm_private_data = proc;
+
+	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+		ret = -ENOMEM;
+		failure_string = "alloc small buf";
+		goto err_alloc_small_buf_failed;
+	}
+	buffer = proc->buffer;
+	INIT_LIST_HEAD(&proc->buffers);
+	list_add(&buffer->entry, &proc->buffers);
+	buffer->free = 1;
+	binder_insert_free_buffer(proc, buffer);
+	proc->free_async_space = proc->buffer_size / 2;
+	barrier();
+	proc->files = get_files_struct(current);
+	proc->vma = vma;
+
+	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
+		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+	return 0;
+
+err_alloc_small_buf_failed:
+	kfree(proc->pages);
+	proc->pages = NULL;
+err_alloc_pages_failed:
+	vfree(proc->buffer);
+	proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+err_bad_arg:
+	printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
+	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+	return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+	struct binder_proc *proc;
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+		     current->group_leader->pid, current->pid);
+
+	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+	if (proc == NULL)
+		return -ENOMEM;
+	get_task_struct(current);
+	proc->tsk = current;
+	INIT_LIST_HEAD(&proc->todo);
+	init_waitqueue_head(&proc->wait);
+	proc->default_priority = task_nice(current);
+	mutex_lock(&binder_lock);
+	binder_stats_created(BINDER_STAT_PROC);
+	hlist_add_head(&proc->proc_node, &binder_procs);
+	proc->pid = current->group_leader->pid;
+	INIT_LIST_HEAD(&proc->delivered_death);
+	filp->private_data = proc;
+	mutex_unlock(&binder_lock);
+
+	if (binder_debugfs_dir_entry_proc) {
+		char strbuf[11];
+		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+	}
+
+	return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+	struct binder_proc *proc = filp->private_data;
+
+	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+	return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+	struct rb_node *n;
+	int wake_count = 0;
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+			wake_up_interruptible(&thread->wait);
+			wake_count++;
+		}
+	}
+	wake_up_interruptible_all(&proc->wait);
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder_flush: %d woke %d threads\n", proc->pid,
+		     wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+	struct binder_proc *proc = filp->private_data;
+	debugfs_remove(proc->debugfs_entry);
+	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+	return 0;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+	struct hlist_node *pos;
+	struct binder_transaction *t;
+	struct rb_node *n;
+	int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+	BUG_ON(proc->vma);
+	BUG_ON(proc->files);
+
+	hlist_del(&proc->proc_node);
+	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+		binder_debug(BINDER_DEBUG_DEAD_BINDER,
+			     "binder_release: %d context_mgr_node gone\n",
+			     proc->pid);
+		binder_context_mgr_node = NULL;
+	}
+
+	threads = 0;
+	active_transactions = 0;
+	while ((n = rb_first(&proc->threads))) {
+		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+		threads++;
+		active_transactions += binder_free_thread(proc, thread);
+	}
+	nodes = 0;
+	incoming_refs = 0;
+	while ((n = rb_first(&proc->nodes))) {
+		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+		nodes++;
+		rb_erase(&node->rb_node, &proc->nodes);
+		list_del_init(&node->work.entry);
+		if (hlist_empty(&node->refs)) {
+			kfree(node);
+			binder_stats_deleted(BINDER_STAT_NODE);
+		} else {
+			struct binder_ref *ref;
+			int death = 0;
+
+			node->proc = NULL;
+			node->local_strong_refs = 0;
+			node->local_weak_refs = 0;
+			hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+			hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+				incoming_refs++;
+				if (ref->death) {
+					death++;
+					if (list_empty(&ref->death->work.entry)) {
+						ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+						list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+						wake_up_interruptible(&ref->proc->wait);
+					} else
+						BUG();
+				}
+			}
+			binder_debug(BINDER_DEBUG_DEAD_BINDER,
+				     "binder: node %d now dead, "
+				     "refs %d, death %d\n", node->debug_id,
+				     incoming_refs, death);
+		}
+	}
+	outgoing_refs = 0;
+	while ((n = rb_first(&proc->refs_by_desc))) {
+		struct binder_ref *ref = rb_entry(n, struct binder_ref,
+						  rb_node_desc);
+		outgoing_refs++;
+		binder_delete_ref(ref);
+	}
+	binder_release_work(&proc->todo);
+	buffers = 0;
+
+	while ((n = rb_first(&proc->allocated_buffers))) {
+		struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
+							rb_node);
+		t = buffer->transaction;
+		if (t) {
+			t->buffer = NULL;
+			buffer->transaction = NULL;
+			printk(KERN_ERR "binder: release proc %d, "
+			       "transaction %d, not freed\n",
+			       proc->pid, t->debug_id);
+			/*BUG();*/
+		}
+		binder_free_buf(proc, buffer);
+		buffers++;
+	}
+
+	binder_stats_deleted(BINDER_STAT_PROC);
+
+	page_count = 0;
+	if (proc->pages) {
+		int i;
+		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+			if (proc->pages[i]) {
+				void *page_addr = proc->buffer + i * PAGE_SIZE;
+				binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+					     "binder_release: %d: "
+					     "page %d at %p not freed\n",
+					     proc->pid, i,
+					     page_addr);
+				unmap_kernel_range((unsigned long)page_addr,
+					PAGE_SIZE);
+				__free_page(proc->pages[i]);
+				page_count++;
+			}
+		}
+		kfree(proc->pages);
+		vfree(proc->buffer);
+	}
+
+	put_task_struct(proc->tsk);
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder_release: %d threads %d, nodes %d (ref %d), "
+		     "refs %d, active transactions %d, buffers %d, "
+		     "pages %d\n",
+		     proc->pid, threads, nodes, incoming_refs, outgoing_refs,
+		     active_transactions, buffers, page_count);
+
+	kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+	struct binder_proc *proc;
+	struct files_struct *files;
+
+	int defer;
+	do {
+		mutex_lock(&binder_lock);
+		mutex_lock(&binder_deferred_lock);
+		if (!hlist_empty(&binder_deferred_list)) {
+			proc = hlist_entry(binder_deferred_list.first,
+					struct binder_proc, deferred_work_node);
+			hlist_del_init(&proc->deferred_work_node);
+			defer = proc->deferred_work;
+			proc->deferred_work = 0;
+		} else {
+			proc = NULL;
+			defer = 0;
+		}
+		mutex_unlock(&binder_deferred_lock);
+
+		files = NULL;
+		if (defer & BINDER_DEFERRED_PUT_FILES) {
+			files = proc->files;
+			if (files)
+				proc->files = NULL;
+		}
+
+		if (defer & BINDER_DEFERRED_FLUSH)
+			binder_deferred_flush(proc);
+
+		if (defer & BINDER_DEFERRED_RELEASE)
+			binder_deferred_release(proc); /* frees proc */
+
+		mutex_unlock(&binder_lock);
+		if (files)
+			put_files_struct(files);
+	} while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+	mutex_lock(&binder_deferred_lock);
+	proc->deferred_work |= defer;
+	if (hlist_unhashed(&proc->deferred_work_node)) {
+		hlist_add_head(&proc->deferred_work_node,
+				&binder_deferred_list);
+		queue_work(binder_deferred_workqueue, &binder_deferred_work);
+	}
+	mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+				     struct binder_transaction *t)
+{
+	seq_printf(m,
+		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+		   prefix, t->debug_id, t,
+		   t->from ? t->from->proc->pid : 0,
+		   t->from ? t->from->pid : 0,
+		   t->to_proc ? t->to_proc->pid : 0,
+		   t->to_thread ? t->to_thread->pid : 0,
+		   t->code, t->flags, t->priority, t->need_reply);
+	if (t->buffer == NULL) {
+		seq_puts(m, " buffer free\n");
+		return;
+	}
+	if (t->buffer->target_node)
+		seq_printf(m, " node %d",
+			   t->buffer->target_node->debug_id);
+	seq_printf(m, " size %zd:%zd data %p\n",
+		   t->buffer->data_size, t->buffer->offsets_size,
+		   t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+				struct binder_buffer *buffer)
+{
+	seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+		   prefix, buffer->debug_id, buffer->data,
+		   buffer->data_size, buffer->offsets_size,
+		   buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+			      const char *transaction_prefix,
+			      struct binder_work *w)
+{
+	struct binder_node *node;
+	struct binder_transaction *t;
+
+	switch (w->type) {
+	case BINDER_WORK_TRANSACTION:
+		t = container_of(w, struct binder_transaction, work);
+		print_binder_transaction(m, transaction_prefix, t);
+		break;
+	case BINDER_WORK_TRANSACTION_COMPLETE:
+		seq_printf(m, "%stransaction complete\n", prefix);
+		break;
+	case BINDER_WORK_NODE:
+		node = container_of(w, struct binder_node, work);
+		seq_printf(m, "%snode work %d: u%p c%p\n",
+			   prefix, node->debug_id, node->ptr, node->cookie);
+		break;
+	case BINDER_WORK_DEAD_BINDER:
+		seq_printf(m, "%shas dead binder\n", prefix);
+		break;
+	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		seq_printf(m, "%shas cleared dead binder\n", prefix);
+		break;
+	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+		seq_printf(m, "%shas cleared death notification\n", prefix);
+		break;
+	default:
+		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+		break;
+	}
+}
+
+static void print_binder_thread(struct seq_file *m,
+				struct binder_thread *thread,
+				int print_always)
+{
+	struct binder_transaction *t;
+	struct binder_work *w;
+	size_t start_pos = m->count;
+	size_t header_pos;
+
+	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
+	header_pos = m->count;
+	t = thread->transaction_stack;
+	while (t) {
+		if (t->from == thread) {
+			print_binder_transaction(m,
+						 "    outgoing transaction", t);
+			t = t->from_parent;
+		} else if (t->to_thread == thread) {
+			print_binder_transaction(m,
+						 "    incoming transaction", t);
+			t = t->to_parent;
+		} else {
+			print_binder_transaction(m, "    bad transaction", t);
+			t = NULL;
+		}
+	}
+	list_for_each_entry(w, &thread->todo, entry) {
+		print_binder_work(m, "    ", "    pending transaction", w);
+	}
+	if (!print_always && m->count == header_pos)
+		m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+	struct binder_ref *ref;
+	struct hlist_node *pos;
+	struct binder_work *w;
+	int count;
+
+	count = 0;
+	hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+		count++;
+
+	seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+		   node->debug_id, node->ptr, node->cookie,
+		   node->has_strong_ref, node->has_weak_ref,
+		   node->local_strong_refs, node->local_weak_refs,
+		   node->internal_strong_refs, count);
+	if (count) {
+		seq_puts(m, " proc");
+		hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+			seq_printf(m, " %d", ref->proc->pid);
+	}
+	seq_puts(m, "\n");
+	list_for_each_entry(w, &node->async_todo, entry)
+		print_binder_work(m, "    ",
+				  "    pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
+		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+			      struct binder_proc *proc, int print_all)
+{
+	struct binder_work *w;
+	struct rb_node *n;
+	size_t start_pos = m->count;
+	size_t header_pos;
+
+	seq_printf(m, "proc %d\n", proc->pid);
+	header_pos = m->count;
+
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+		print_binder_thread(m, rb_entry(n, struct binder_thread,
+						rb_node), print_all);
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+		struct binder_node *node = rb_entry(n, struct binder_node,
+						    rb_node);
+		if (print_all || node->has_async_transaction)
+			print_binder_node(m, node);
+	}
+	if (print_all) {
+		for (n = rb_first(&proc->refs_by_desc);
+		     n != NULL;
+		     n = rb_next(n))
+			print_binder_ref(m, rb_entry(n, struct binder_ref,
+						     rb_node_desc));
+	}
+	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+		print_binder_buffer(m, "  buffer",
+				    rb_entry(n, struct binder_buffer, rb_node));
+	list_for_each_entry(w, &proc->todo, entry)
+		print_binder_work(m, "  ", "  pending transaction", w);
+	list_for_each_entry(w, &proc->delivered_death, entry) {
+		seq_puts(m, "  has delivered dead binder\n");
+		break;
+	}
+	if (!print_all && m->count == header_pos)
+		m->count = start_pos;
+}
+
+static const char *binder_return_strings[] = {
+	"BR_ERROR",
+	"BR_OK",
+	"BR_TRANSACTION",
+	"BR_REPLY",
+	"BR_ACQUIRE_RESULT",
+	"BR_DEAD_REPLY",
+	"BR_TRANSACTION_COMPLETE",
+	"BR_INCREFS",
+	"BR_ACQUIRE",
+	"BR_RELEASE",
+	"BR_DECREFS",
+	"BR_ATTEMPT_ACQUIRE",
+	"BR_NOOP",
+	"BR_SPAWN_LOOPER",
+	"BR_FINISHED",
+	"BR_DEAD_BINDER",
+	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
+	"BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+	"BC_TRANSACTION",
+	"BC_REPLY",
+	"BC_ACQUIRE_RESULT",
+	"BC_FREE_BUFFER",
+	"BC_INCREFS",
+	"BC_ACQUIRE",
+	"BC_RELEASE",
+	"BC_DECREFS",
+	"BC_INCREFS_DONE",
+	"BC_ACQUIRE_DONE",
+	"BC_ATTEMPT_ACQUIRE",
+	"BC_REGISTER_LOOPER",
+	"BC_ENTER_LOOPER",
+	"BC_EXIT_LOOPER",
+	"BC_REQUEST_DEATH_NOTIFICATION",
+	"BC_CLEAR_DEATH_NOTIFICATION",
+	"BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+	"proc",
+	"thread",
+	"node",
+	"ref",
+	"death",
+	"transaction",
+	"transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+			       struct binder_stats *stats)
+{
+	int i;
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+		     ARRAY_SIZE(binder_command_strings));
+	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+		if (stats->bc[i])
+			seq_printf(m, "%s%s: %d\n", prefix,
+				   binder_command_strings[i], stats->bc[i]);
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+		     ARRAY_SIZE(binder_return_strings));
+	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+		if (stats->br[i])
+			seq_printf(m, "%s%s: %d\n", prefix,
+				   binder_return_strings[i], stats->br[i]);
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+		     ARRAY_SIZE(binder_objstat_strings));
+	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+		     ARRAY_SIZE(stats->obj_deleted));
+	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+		if (stats->obj_created[i] || stats->obj_deleted[i])
+			seq_printf(m, "%s%s: active %d total %d\n", prefix,
+				binder_objstat_strings[i],
+				stats->obj_created[i] - stats->obj_deleted[i],
+				stats->obj_created[i]);
+	}
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+				    struct binder_proc *proc)
+{
+	struct binder_work *w;
+	struct rb_node *n;
+	int count, strong, weak;
+
+	seq_printf(m, "proc %d\n", proc->pid);
+	count = 0;
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+		count++;
+	seq_printf(m, "  threads: %d\n", count);
+	seq_printf(m, "  requested threads: %d+%d/%d\n"
+			"  ready threads %d\n"
+			"  free async space %zd\n", proc->requested_threads,
+			proc->requested_threads_started, proc->max_threads,
+			proc->ready_threads, proc->free_async_space);
+	count = 0;
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+		count++;
+	seq_printf(m, "  nodes: %d\n", count);
+	count = 0;
+	strong = 0;
+	weak = 0;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		struct binder_ref *ref = rb_entry(n, struct binder_ref,
+						  rb_node_desc);
+		count++;
+		strong += ref->strong;
+		weak += ref->weak;
+	}
+	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
+
+	count = 0;
+	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+		count++;
+	seq_printf(m, "  buffers: %d\n", count);
+
+	count = 0;
+	list_for_each_entry(w, &proc->todo, entry) {
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION:
+			count++;
+			break;
+		default:
+			break;
+		}
+	}
+	seq_printf(m, "  pending transactions: %d\n", count);
+
+	print_binder_stats(m, "  ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc;
+	struct hlist_node *pos;
+	struct binder_node *node;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+
+	seq_puts(m, "binder state:\n");
+
+	if (!hlist_empty(&binder_dead_nodes))
+		seq_puts(m, "dead nodes:\n");
+	hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+		print_binder_node(m, node);
+
+	hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+		print_binder_proc(m, proc, 1);
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc;
+	struct hlist_node *pos;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+
+	seq_puts(m, "binder stats:\n");
+
+	print_binder_stats(m, "", &binder_stats);
+
+	hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+		print_binder_proc_stats(m, proc);
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc;
+	struct hlist_node *pos;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+
+	seq_puts(m, "binder transactions:\n");
+	hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+		print_binder_proc(m, proc, 0);
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc = m->private;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		mutex_lock(&binder_lock);
+	seq_puts(m, "binder proc state:\n");
+	print_binder_proc(m, proc, 1);
+	if (do_lock)
+		mutex_unlock(&binder_lock);
+	return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+					struct binder_transaction_log_entry *e)
+{
+	seq_printf(m,
+		   "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+		   e->debug_id, (e->call_type == 2) ? "reply" :
+		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+		   e->from_thread, e->to_proc, e->to_thread, e->to_node,
+		   e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+	struct binder_transaction_log *log = m->private;
+	int i;
+
+	if (log->full) {
+		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+			print_binder_transaction_log_entry(m, &log->entry[i]);
+	}
+	for (i = 0; i < log->next; i++)
+		print_binder_transaction_log_entry(m, &log->entry[i]);
+	return 0;
+}
+
+static const struct file_operations binder_fops = {
+	.owner = THIS_MODULE,
+	.poll = binder_poll,
+	.unlocked_ioctl = binder_ioctl,
+	.mmap = binder_mmap,
+	.open = binder_open,
+	.flush = binder_flush,
+	.release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "binder",
+	.fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+	int ret;
+
+	binder_deferred_workqueue = create_singlethread_workqueue("binder");
+	if (!binder_deferred_workqueue)
+		return -ENOMEM;
+
+	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+	if (binder_debugfs_dir_entry_root)
+		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+						 binder_debugfs_dir_entry_root);
+	ret = misc_register(&binder_miscdev);
+	if (binder_debugfs_dir_entry_root) {
+		debugfs_create_file("state",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    NULL,
+				    &binder_state_fops);
+		debugfs_create_file("stats",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    NULL,
+				    &binder_stats_fops);
+		debugfs_create_file("transactions",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    NULL,
+				    &binder_transactions_fops);
+		debugfs_create_file("transaction_log",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    &binder_transaction_log,
+				    &binder_transaction_log_fops);
+		debugfs_create_file("failed_transaction_log",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    &binder_transaction_log_failed,
+				    &binder_transaction_log_fops);
+	}
+	return ret;
+}
+
+device_initcall(binder_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
new file mode 100644
index 0000000..863ae1a
--- /dev/null
+++ b/drivers/staging/android/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+	/* 8 bytes for large_flat_header. */
+	unsigned long		type;
+	unsigned long		flags;
+
+	/* 8 bytes of data. */
+	union {
+		void		*binder;	/* local object */
+		signed long	handle;		/* remote object */
+	};
+
+	/* extra data associated with local object */
+	void			*cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+	signed long	write_size;	/* bytes to write */
+	signed long	write_consumed;	/* bytes consumed by driver */
+	unsigned long	write_buffer;
+	signed long	read_size;	/* bytes to read */
+	signed long	read_consumed;	/* bytes consumed by driver */
+	unsigned long	read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+	/* driver protocol version -- increment with incompatible change */
+	signed long	protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ   		_IOWR('b', 1, struct binder_write_read)
+#define	BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, int64_t)
+#define	BINDER_SET_MAX_THREADS		_IOW('b', 5, size_t)
+#define	BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, int)
+#define	BINDER_SET_CONTEXT_MGR		_IOW('b', 7, int)
+#define	BINDER_THREAD_EXIT		_IOW('b', 8, int)
+#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
+	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
+	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
+	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
+	 * identifying the target and contents of the transaction.
+	 */
+	union {
+		size_t	handle;	/* target descriptor of command transaction */
+		void	*ptr;	/* target descriptor of return transaction */
+	} target;
+	void		*cookie;	/* target object cookie */
+	unsigned int	code;		/* transaction command */
+
+	/* General information about the transaction. */
+	unsigned int	flags;
+	pid_t		sender_pid;
+	uid_t		sender_euid;
+	size_t		data_size;	/* number of bytes of data */
+	size_t		offsets_size;	/* number of bytes of offsets */
+
+	/* If this transaction is inline, the data immediately
+	 * follows here; otherwise, it ends with a pointer to
+	 * the data buffer.
+	 */
+	union {
+		struct {
+			/* transaction data */
+			const void	*buffer;
+			/* offsets from buffer to flat_binder_object structs */
+			const void	*offsets;
+		} ptr;
+		uint8_t	buf[8];
+	} data;
+};
+
+struct binder_ptr_cookie {
+	void *ptr;
+	void *cookie;
+};
+
+struct binder_pri_desc {
+	int priority;
+	int desc;
+};
+
+struct binder_pri_ptr_cookie {
+	int priority;
+	void *ptr;
+	void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+	BR_ERROR = _IOR('r', 0, int),
+	/*
+	 * int: error code
+	 */
+
+	BR_OK = _IO('r', 1),
+	/* No parameters! */
+
+	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+	/*
+	 * binder_transaction_data: the received command.
+	 */
+
+	BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+	/*
+	 * not currently supported
+	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+	 * Else the remote object has acquired a primary reference.
+	 */
+
+	BR_DEAD_REPLY = _IO('r', 5),
+	/*
+	 * The target of the last transaction (either a bcTRANSACTION or
+	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+	 */
+
+	BR_TRANSACTION_COMPLETE = _IO('r', 6),
+	/*
+	 * No parameters... always refers to the last transaction requested
+	 * (including replies).  Note that this will be sent even for
+	 * asynchronous transactions.
+	 */
+
+	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+	/*
+	 * void *:	ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+	/*
+	 * not currently supported
+	 * int:	priority
+	 * void *: ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BR_NOOP = _IO('r', 12),
+	/*
+	 * No parameters.  Do nothing and examine the next command.  It exists
+	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+	 */
+
+	BR_SPAWN_LOOPER = _IO('r', 13),
+	/*
+	 * No parameters.  The driver has determined that a process has no
+	 * threads waiting to service incomming transactions.  When a process
+	 * receives this command, it must spawn a new service thread and
+	 * register it via bcENTER_LOOPER.
+	 */
+
+	BR_FINISHED = _IO('r', 14),
+	/*
+	 * not currently supported
+	 * stop threadpool thread
+	 */
+
+	BR_DEAD_BINDER = _IOR('r', 15, void *),
+	/*
+	 * void *: cookie
+	 */
+	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+	/*
+	 * void *: cookie
+	 */
+
+	BR_FAILED_REPLY = _IO('r', 17),
+	/*
+	 * The the last transaction (either a bcTRANSACTION or
+	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+	 */
+};
+
+enum BinderDriverCommandProtocol {
+	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+	/*
+	 * binder_transaction_data: the sent command.
+	 */
+
+	BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+	/*
+	 * not currently supported
+	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+	 * Else you have acquired a primary reference on the object.
+	 */
+
+	BC_FREE_BUFFER = _IOW('c', 3, int),
+	/*
+	 * void *: ptr to transaction data received on a read
+	 */
+
+	BC_INCREFS = _IOW('c', 4, int),
+	BC_ACQUIRE = _IOW('c', 5, int),
+	BC_RELEASE = _IOW('c', 6, int),
+	BC_DECREFS = _IOW('c', 7, int),
+	/*
+	 * int:	descriptor
+	 */
+
+	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+	/*
+	 * not currently supported
+	 * int: priority
+	 * int: descriptor
+	 */
+
+	BC_REGISTER_LOOPER = _IO('c', 11),
+	/*
+	 * No parameters.
+	 * Register a spawned looper thread with the device.
+	 */
+
+	BC_ENTER_LOOPER = _IO('c', 12),
+	BC_EXIT_LOOPER = _IO('c', 13),
+	/*
+	 * No parameters.
+	 * These two commands are sent as an application-level thread
+	 * enters and exits the binder loop, respectively.  They are
+	 * used so the binder can have an accurate count of the number
+	 * of looping threads it has available.
+	 */
+
+	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie
+	 */
+
+	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie
+	 */
+
+	BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+	/*
+	 * void *: cookie
+	 */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644
index 0000000..531bdbe
--- /dev/null
+++ b/drivers/staging/android/logger.c
@@ -0,0 +1,616 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+	unsigned char 		*buffer;/* the ring buffer itself */
+	struct miscdevice	misc;	/* misc device representing the log */
+	wait_queue_head_t	wq;	/* wait queue for readers */
+	struct list_head	readers; /* this log's readers */
+	struct mutex		mutex;	/* mutex protecting buffer */
+	size_t			w_off;	/* current write head offset */
+	size_t			head;	/* new readers start here */
+	size_t			size;	/* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+	struct logger_log	*log;	/* associated log */
+	struct list_head	list;	/* entry in logger_log's list */
+	size_t			r_off;	/* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+#define logger_offset(n)	((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 	1) Need to quickly obtain the associated log during an I/O operation
+ * 	2) Readers need to maintain state (logger_reader)
+ * 	3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader = file->private_data;
+		return reader->log;
+	} else
+		return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+	__u16 val;
+
+	switch (log->size - off) {
+	case 1:
+		memcpy(&val, log->buffer + off, 1);
+		memcpy(((char *) &val) + 1, log->buffer, 1);
+		break;
+	default:
+		memcpy(&val, log->buffer + off, 2);
+	}
+
+	return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+				   struct logger_reader *reader,
+				   char __user *buf,
+				   size_t count)
+{
+	size_t len;
+
+	/*
+	 * We read from the log in two disjoint operations. First, we read from
+	 * the current read head offset up to 'count' bytes or to the end of
+	 * the log, whichever comes first.
+	 */
+	len = min(count, log->size - reader->r_off);
+	if (copy_to_user(buf, log->buffer + reader->r_off, len))
+		return -EFAULT;
+
+	/*
+	 * Second, we read any remaining bytes, starting back at the head of
+	 * the log.
+	 */
+	if (count != len)
+		if (copy_to_user(buf + len, log->buffer, count - len))
+			return -EFAULT;
+
+	reader->r_off = logger_offset(reader->r_off + count);
+
+	return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * 	- O_NONBLOCK works
+ * 	- If there are no log entries to read, blocks until log is written to
+ * 	- Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *pos)
+{
+	struct logger_reader *reader = file->private_data;
+	struct logger_log *log = reader->log;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+start:
+	while (1) {
+		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+		mutex_lock(&log->mutex);
+		ret = (log->w_off == reader->r_off);
+		mutex_unlock(&log->mutex);
+		if (!ret)
+			break;
+
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		schedule();
+	}
+
+	finish_wait(&log->wq, &wait);
+	if (ret)
+		return ret;
+
+	mutex_lock(&log->mutex);
+
+	/* is there still something to read or did we race? */
+	if (unlikely(log->w_off == reader->r_off)) {
+		mutex_unlock(&log->mutex);
+		goto start;
+	}
+
+	/* get the size of the next entry */
+	ret = get_entry_len(log, reader->r_off);
+	if (count < ret) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* get exactly one entry from the log */
+	ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+	size_t count = 0;
+
+	do {
+		size_t nr = get_entry_len(log, off);
+		off = logger_offset(off + nr);
+		count += nr;
+	} while (count < len);
+
+	return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+	if (b < a) {
+		if (a < c || b >= c)
+			return 1;
+	} else {
+		if (a < c && b >= c)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+	size_t old = log->w_off;
+	size_t new = logger_offset(old + len);
+	struct logger_reader *reader;
+
+	if (clock_interval(old, new, log->head))
+		log->head = get_next_entry(log, log->head, len);
+
+	list_for_each_entry(reader, &log->readers, list)
+		if (clock_interval(old, new, reader->r_off))
+			reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+	size_t len;
+
+	len = min(count, log->size - log->w_off);
+	memcpy(log->buffer + log->w_off, buf, len);
+
+	if (count != len)
+		memcpy(log->buffer, buf + len, count - len);
+
+	log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+				      const void __user *buf, size_t count)
+{
+	size_t len;
+
+	len = min(count, log->size - log->w_off);
+	if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+		return -EFAULT;
+
+	if (count != len)
+		if (copy_from_user(log->buffer, buf + len, count - len))
+			return -EFAULT;
+
+	log->w_off = logger_offset(log->w_off + count);
+
+	return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+			 unsigned long nr_segs, loff_t ppos)
+{
+	struct logger_log *log = file_get_log(iocb->ki_filp);
+	size_t orig = log->w_off;
+	struct logger_entry header;
+	struct timespec now;
+	ssize_t ret = 0;
+
+	now = current_kernel_time();
+
+	header.pid = current->tgid;
+	header.tid = current->pid;
+	header.sec = now.tv_sec;
+	header.nsec = now.tv_nsec;
+	header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+	/* null writes succeed, return zero */
+	if (unlikely(!header.len))
+		return 0;
+
+	mutex_lock(&log->mutex);
+
+	/*
+	 * Fix up any readers, pulling them forward to the first readable
+	 * entry after (what will be) the new write offset. We do this now
+	 * because if we partially fail, we can end up with clobbered log
+	 * entries that encroach on readable buffer.
+	 */
+	fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+	do_write_log(log, &header, sizeof(struct logger_entry));
+
+	while (nr_segs-- > 0) {
+		size_t len;
+		ssize_t nr;
+
+		/* figure out how much of this vector we can keep */
+		len = min_t(size_t, iov->iov_len, header.len - ret);
+
+		/* write out this segment's payload */
+		nr = do_write_log_from_user(log, iov->iov_base, len);
+		if (unlikely(nr < 0)) {
+			log->w_off = orig;
+			mutex_unlock(&log->mutex);
+			return nr;
+		}
+
+		iov++;
+		ret += nr;
+	}
+
+	mutex_unlock(&log->mutex);
+
+	/* wake up any blocked readers */
+	wake_up_interruptible(&log->wq);
+
+	return ret;
+}
+
+static struct logger_log *get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+	struct logger_log *log;
+	int ret;
+
+	ret = nonseekable_open(inode, file);
+	if (ret)
+		return ret;
+
+	log = get_log_from_minor(MINOR(inode->i_rdev));
+	if (!log)
+		return -ENODEV;
+
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader;
+
+		reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+		if (!reader)
+			return -ENOMEM;
+
+		reader->log = log;
+		INIT_LIST_HEAD(&reader->list);
+
+		mutex_lock(&log->mutex);
+		reader->r_off = log->head;
+		list_add_tail(&reader->list, &log->readers);
+		mutex_unlock(&log->mutex);
+
+		file->private_data = reader;
+	} else
+		file->private_data = log;
+
+	return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader = file->private_data;
+		list_del(&reader->list);
+		kfree(reader);
+	}
+
+	return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+	struct logger_reader *reader;
+	struct logger_log *log;
+	unsigned int ret = POLLOUT | POLLWRNORM;
+
+	if (!(file->f_mode & FMODE_READ))
+		return ret;
+
+	reader = file->private_data;
+	log = reader->log;
+
+	poll_wait(file, &log->wq, wait);
+
+	mutex_lock(&log->mutex);
+	if (log->w_off != reader->r_off)
+		ret |= POLLIN | POLLRDNORM;
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct logger_log *log = file_get_log(file);
+	struct logger_reader *reader;
+	long ret = -ENOTTY;
+
+	mutex_lock(&log->mutex);
+
+	switch (cmd) {
+	case LOGGER_GET_LOG_BUF_SIZE:
+		ret = log->size;
+		break;
+	case LOGGER_GET_LOG_LEN:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		if (log->w_off >= reader->r_off)
+			ret = log->w_off - reader->r_off;
+		else
+			ret = (log->size - reader->r_off) + log->w_off;
+		break;
+	case LOGGER_GET_NEXT_ENTRY_LEN:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		if (log->w_off != reader->r_off)
+			ret = get_entry_len(log, reader->r_off);
+		else
+			ret = 0;
+		break;
+	case LOGGER_FLUSH_LOG:
+		if (!(file->f_mode & FMODE_WRITE)) {
+			ret = -EBADF;
+			break;
+		}
+		list_for_each_entry(reader, &log->readers, list)
+			reader->r_off = log->w_off;
+		log->head = log->w_off;
+		ret = 0;
+		break;
+	}
+
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+static const struct file_operations logger_fops = {
+	.owner = THIS_MODULE,
+	.read = logger_read,
+	.aio_write = logger_aio_write,
+	.poll = logger_poll,
+	.unlocked_ioctl = logger_ioctl,
+	.compat_ioctl = logger_ioctl,
+	.open = logger_open,
+	.release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+	.buffer = _buf_ ## VAR, \
+	.misc = { \
+		.minor = MISC_DYNAMIC_MINOR, \
+		.name = NAME, \
+		.fops = &logger_fops, \
+		.parent = NULL, \
+	}, \
+	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+	.readers = LIST_HEAD_INIT(VAR .readers), \
+	.mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+	.w_off = 0, \
+	.head = 0, \
+	.size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024)
+DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 64*1024)
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+	if (log_main.misc.minor == minor)
+		return &log_main;
+	if (log_events.misc.minor == minor)
+		return &log_events;
+	if (log_radio.misc.minor == minor)
+		return &log_radio;
+	if (log_system.misc.minor == minor)
+		return &log_system;
+	return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+	int ret;
+
+	ret = misc_register(&log->misc);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "logger: failed to register misc "
+		       "device for log '%s'!\n", log->misc.name);
+		return ret;
+	}
+
+	printk(KERN_INFO "logger: created %luK log '%s'\n",
+	       (unsigned long) log->size >> 10, log->misc.name);
+
+	return 0;
+}
+
+static int __init logger_init(void)
+{
+	int ret;
+
+	ret = init_log(&log_main);
+	if (unlikely(ret))
+		goto out;
+
+	ret = init_log(&log_events);
+	if (unlikely(ret))
+		goto out;
+
+	ret = init_log(&log_radio);
+	if (unlikely(ret))
+		goto out;
+
+	ret = init_log(&log_system);
+	if (unlikely(ret))
+		goto out;
+
+out:
+	return ret;
+}
+device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644
index 0000000..2cb06e9
--- /dev/null
+++ b/drivers/staging/android/logger.h
@@ -0,0 +1,49 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+	__u16		len;	/* length of the payload */
+	__u16		__pad;	/* no matter what, we get 2 bytes of padding */
+	__s32		pid;	/* generating process's pid */
+	__s32		tid;	/* generating process's tid */
+	__s32		sec;	/* seconds since Epoch */
+	__s32		nsec;	/* nanoseconds */
+	char		msg[0];	/* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO	"log_radio"	/* radio-related messages */
+#define LOGGER_LOG_EVENTS	"log_events"	/* system/hardware events */
+#define LOGGER_LOG_SYSTEM	"log_system"	/* system/framework messages */
+#define LOGGER_LOG_MAIN		"log_main"	/* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN		(4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD	\
+	(LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO	0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE		_IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN		_IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN	_IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG		_IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
new file mode 100644
index 0000000..86d5195
--- /dev/null
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -0,0 +1,213 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * The lowmemorykiller driver lets user-space specify a set of memory thresholds
+ * where processes with a range of oom_adj values will get killed. Specify the
+ * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
+ * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
+ * files take a comma separated list of numbers in ascending order.
+ *
+ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes
+ * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages
+ * and kill processes with a oom_adj value of 0 or higher when the free memory
+ * drops below 1024 pages.
+ *
+ * The driver considers memory used for caches to be free, but if a large
+ * percentage of the cached memory is locked this can be very inaccurate
+ * and processes may not get killed until the normal oom killer is triggered.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+	0,
+	1,
+	6,
+	12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+	3 * 512,	/* 6MB */
+	2 * 1024,	/* 8MB */
+	4 * 1024,	/* 16MB */
+	16 * 1024,	/* 64MB */
+};
+static int lowmem_minfree_size = 4;
+
+static struct task_struct *lowmem_deathpending;
+static unsigned long lowmem_deathpending_timeout;
+
+#define lowmem_print(level, x...)			\
+	do {						\
+		if (lowmem_debug_level >= (level))	\
+			printk(x);			\
+	} while (0)
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data);
+
+static struct notifier_block task_nb = {
+	.notifier_call	= task_notify_func,
+};
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data)
+{
+	struct task_struct *task = data;
+
+	if (task == lowmem_deathpending)
+		lowmem_deathpending = NULL;
+
+	return NOTIFY_OK;
+}
+
+static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+	struct task_struct *p;
+	struct task_struct *selected = NULL;
+	int rem = 0;
+	int tasksize;
+	int i;
+	int min_adj = OOM_ADJUST_MAX + 1;
+	int selected_tasksize = 0;
+	int selected_oom_adj;
+	int array_size = ARRAY_SIZE(lowmem_adj);
+	int other_free = global_page_state(NR_FREE_PAGES);
+	int other_file = global_page_state(NR_FILE_PAGES) -
+						global_page_state(NR_SHMEM);
+
+	/*
+	 * If we already have a death outstanding, then
+	 * bail out right away; indicating to vmscan
+	 * that we have nothing further to offer on
+	 * this pass.
+	 *
+	 */
+	if (lowmem_deathpending &&
+	    time_before_eq(jiffies, lowmem_deathpending_timeout))
+		return 0;
+
+	if (lowmem_adj_size < array_size)
+		array_size = lowmem_adj_size;
+	if (lowmem_minfree_size < array_size)
+		array_size = lowmem_minfree_size;
+	for (i = 0; i < array_size; i++) {
+		if (other_free < lowmem_minfree[i] &&
+		    other_file < lowmem_minfree[i]) {
+			min_adj = lowmem_adj[i];
+			break;
+		}
+	}
+	if (sc->nr_to_scan > 0)
+		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
+			     sc->nr_to_scan, sc->gfp_mask, other_free, other_file,
+			     min_adj);
+	rem = global_page_state(NR_ACTIVE_ANON) +
+		global_page_state(NR_ACTIVE_FILE) +
+		global_page_state(NR_INACTIVE_ANON) +
+		global_page_state(NR_INACTIVE_FILE);
+	if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
+			     sc->nr_to_scan, sc->gfp_mask, rem);
+		return rem;
+	}
+	selected_oom_adj = min_adj;
+
+	read_lock(&tasklist_lock);
+	for_each_process(p) {
+		struct mm_struct *mm;
+		struct signal_struct *sig;
+		int oom_adj;
+
+		task_lock(p);
+		mm = p->mm;
+		sig = p->signal;
+		if (!mm || !sig) {
+			task_unlock(p);
+			continue;
+		}
+		oom_adj = sig->oom_adj;
+		if (oom_adj < min_adj) {
+			task_unlock(p);
+			continue;
+		}
+		tasksize = get_mm_rss(mm);
+		task_unlock(p);
+		if (tasksize <= 0)
+			continue;
+		if (selected) {
+			if (oom_adj < selected_oom_adj)
+				continue;
+			if (oom_adj == selected_oom_adj &&
+			    tasksize <= selected_tasksize)
+				continue;
+		}
+		selected = p;
+		selected_tasksize = tasksize;
+		selected_oom_adj = oom_adj;
+		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+			     p->pid, p->comm, oom_adj, tasksize);
+	}
+	if (selected) {
+		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+			     selected->pid, selected->comm,
+			     selected_oom_adj, selected_tasksize);
+		lowmem_deathpending = selected;
+		lowmem_deathpending_timeout = jiffies + HZ;
+		force_sig(SIGKILL, selected);
+		rem -= selected_tasksize;
+	}
+	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+		     sc->nr_to_scan, sc->gfp_mask, rem);
+	read_unlock(&tasklist_lock);
+	return rem;
+}
+
+static struct shrinker lowmem_shrinker = {
+	.shrink = lowmem_shrink,
+	.seeks = DEFAULT_SEEKS * 16
+};
+
+static int __init lowmem_init(void)
+{
+	task_free_register(&task_nb);
+	register_shrinker(&lowmem_shrinker);
+	return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+	unregister_shrinker(&lowmem_shrinker);
+	task_free_unregister(&task_nb);
+}
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
+			 S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
+			 S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
new file mode 100644
index 0000000..53f736b
--- /dev/null
+++ b/drivers/staging/android/ram_console.c
@@ -0,0 +1,418 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+	uint32_t    sig;
+	uint32_t    start;
+	uint32_t    size;
+	uint8_t     data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+	ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
+{
+	int i;
+	uint16_t par[ECC_SIZE];
+	/* Initialize the parity buffer */
+	memset(par, 0, sizeof(par));
+	encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+	for (i = 0; i < ECC_SIZE; i++)
+		ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
+{
+	int i;
+	uint16_t par[ECC_SIZE];
+	for (i = 0; i < ECC_SIZE; i++)
+		par[i] = ecc[i];
+	return decode_rs8(ram_console_rs_decoder, data, par, len,
+				NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+	struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+	uint8_t *block;
+	uint8_t *par;
+	int size = ECC_BLOCK_SIZE;
+#endif
+	memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+	par = ram_console_par_buffer +
+	      (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+	do {
+		if (block + ECC_BLOCK_SIZE > buffer_end)
+			size = buffer_end - block;
+		ram_console_encode_rs8(block, size, par);
+		block += ECC_BLOCK_SIZE;
+		par += ECC_SIZE;
+	} while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	struct ram_console_buffer *buffer = ram_console_buffer;
+	uint8_t *par;
+	par = ram_console_par_buffer +
+	      DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+	ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+	int rem;
+	struct ram_console_buffer *buffer = ram_console_buffer;
+
+	if (count > ram_console_buffer_size) {
+		s += count - ram_console_buffer_size;
+		count = ram_console_buffer_size;
+	}
+	rem = ram_console_buffer_size - buffer->start;
+	if (rem < count) {
+		ram_console_update(s, rem);
+		s += rem;
+		count -= rem;
+		buffer->start = 0;
+		buffer->size = ram_console_buffer_size;
+	}
+	ram_console_update(s, count);
+
+	buffer->start += count;
+	if (buffer->size < ram_console_buffer_size)
+		buffer->size += count;
+	ram_console_update_header();
+}
+
+static struct console ram_console = {
+	.name	= "ram",
+	.write	= ram_console_write,
+	.flags	= CON_PRINTBUFFER | CON_ENABLED,
+	.index	= -1,
+};
+
+void ram_console_enable_console(int enabled)
+{
+	if (enabled)
+		ram_console.flags |= CON_ENABLED;
+	else
+		ram_console.flags &= ~CON_ENABLED;
+}
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, char *dest)
+{
+	size_t old_log_size = buffer->size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	uint8_t *block;
+	uint8_t *par;
+	char strbuf[80];
+	int strbuf_len;
+
+	block = buffer->data;
+	par = ram_console_par_buffer;
+	while (block < buffer->data + buffer->size) {
+		int numerr;
+		int size = ECC_BLOCK_SIZE;
+		if (block + size > buffer->data + ram_console_buffer_size)
+			size = buffer->data + ram_console_buffer_size - block;
+		numerr = ram_console_decode_rs8(block, size, par);
+		if (numerr > 0) {
+#if 0
+			printk(KERN_INFO "ram_console: error in block %p, %d\n",
+			       block, numerr);
+#endif
+			ram_console_corrected_bytes += numerr;
+		} else if (numerr < 0) {
+#if 0
+			printk(KERN_INFO "ram_console: uncorrectable error in "
+			       "block %p\n", block);
+#endif
+			ram_console_bad_blocks++;
+		}
+		block += ECC_BLOCK_SIZE;
+		par += ECC_SIZE;
+	}
+	if (ram_console_corrected_bytes || ram_console_bad_blocks)
+		strbuf_len = snprintf(strbuf, sizeof(strbuf),
+			"\n%d Corrected bytes, %d unrecoverable blocks\n",
+			ram_console_corrected_bytes, ram_console_bad_blocks);
+	else
+		strbuf_len = snprintf(strbuf, sizeof(strbuf),
+				      "\nNo errors detected\n");
+	if (strbuf_len >= sizeof(strbuf))
+		strbuf_len = sizeof(strbuf) - 1;
+	old_log_size += strbuf_len;
+#endif
+
+	if (dest == NULL) {
+		dest = kmalloc(old_log_size, GFP_KERNEL);
+		if (dest == NULL) {
+			printk(KERN_ERR
+			       "ram_console: failed to allocate buffer\n");
+			return;
+		}
+	}
+
+	ram_console_old_log = dest;
+	ram_console_old_log_size = old_log_size;
+	memcpy(ram_console_old_log,
+	       &buffer->data[buffer->start], buffer->size - buffer->start);
+	memcpy(ram_console_old_log + buffer->size - buffer->start,
+	       &buffer->data[0], buffer->start);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	memcpy(ram_console_old_log + old_log_size - strbuf_len,
+	       strbuf, strbuf_len);
+#endif
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+				   size_t buffer_size, char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	int numerr;
+	uint8_t *par;
+#endif
+	ram_console_buffer = buffer;
+	ram_console_buffer_size =
+		buffer_size - sizeof(struct ram_console_buffer);
+
+	if (ram_console_buffer_size > buffer_size) {
+		pr_err("ram_console: buffer %p, invalid size %zu, "
+		       "datasize %zu\n", buffer, buffer_size,
+		       ram_console_buffer_size);
+		return 0;
+	}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+	ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+						ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+
+	if (ram_console_buffer_size > buffer_size) {
+		pr_err("ram_console: buffer %p, invalid size %zu, "
+		       "non-ecc datasize %zu\n",
+		       buffer, buffer_size, ram_console_buffer_size);
+		return 0;
+	}
+
+	ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+	/* first consecutive root is 0
+	 * primitive element to generate roots = 1
+	 */
+	ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
+	if (ram_console_rs_decoder == NULL) {
+		printk(KERN_INFO "ram_console: init_rs failed\n");
+		return 0;
+	}
+
+	ram_console_corrected_bytes = 0;
+	ram_console_bad_blocks = 0;
+
+	par = ram_console_par_buffer +
+	      DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+
+	numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+	if (numerr > 0) {
+		printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+		ram_console_corrected_bytes += numerr;
+	} else if (numerr < 0) {
+		printk(KERN_INFO
+		       "ram_console: uncorrectable error in header\n");
+		ram_console_bad_blocks++;
+	}
+#endif
+
+	if (buffer->sig == RAM_CONSOLE_SIG) {
+		if (buffer->size > ram_console_buffer_size
+		    || buffer->start > buffer->size)
+			printk(KERN_INFO "ram_console: found existing invalid "
+			       "buffer, size %d, start %d\n",
+			       buffer->size, buffer->start);
+		else {
+			printk(KERN_INFO "ram_console: found existing buffer, "
+			       "size %d, start %d\n",
+			       buffer->size, buffer->start);
+			ram_console_save_old(buffer, old_buf);
+		}
+	} else {
+		printk(KERN_INFO "ram_console: no valid data in buffer "
+		       "(sig = 0x%08x)\n", buffer->sig);
+	}
+
+	buffer->sig = RAM_CONSOLE_SIG;
+	buffer->start = 0;
+	buffer->size = 0;
+
+	register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+	console_verbose();
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+	return ram_console_init((struct ram_console_buffer *)
+		CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+		CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+		ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+	struct resource *res = pdev->resource;
+	size_t start;
+	size_t buffer_size;
+	void *buffer;
+
+	if (res == NULL || pdev->num_resources != 1 ||
+	    !(res->flags & IORESOURCE_MEM)) {
+		printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+		       "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+		return -ENXIO;
+	}
+	buffer_size = res->end - res->start + 1;
+	start = res->start;
+	printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
+	       start, buffer_size);
+	buffer = ioremap(res->start, buffer_size);
+	if (buffer == NULL) {
+		printk(KERN_ERR "ram_console: failed to map memory\n");
+		return -ENOMEM;
+	}
+
+	return ram_console_init(buffer, buffer_size, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+	.probe = ram_console_driver_probe,
+	.driver		= {
+		.name	= "ram_console",
+	},
+};
+
+static int __init ram_console_module_init(void)
+{
+	int err;
+	err = platform_driver_register(&ram_console_driver);
+	return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+				    size_t len, loff_t *offset)
+{
+	loff_t pos = *offset;
+	ssize_t count;
+
+	if (pos >= ram_console_old_log_size)
+		return 0;
+
+	count = min(len, (size_t)(ram_console_old_log_size - pos));
+	if (copy_to_user(buf, ram_console_old_log + pos, count))
+		return -EFAULT;
+
+	*offset += count;
+	return count;
+}
+
+static const struct file_operations ram_console_file_ops = {
+	.owner = THIS_MODULE,
+	.read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+	struct proc_dir_entry *entry;
+
+	if (ram_console_old_log == NULL)
+		return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+	ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+	if (ram_console_old_log == NULL) {
+		printk(KERN_ERR
+		       "ram_console: failed to allocate buffer for old log\n");
+		ram_console_old_log_size = 0;
+		return 0;
+	}
+	memcpy(ram_console_old_log,
+	       ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+	entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+	if (!entry) {
+		printk(KERN_ERR "ram_console: failed to create proc entry\n");
+		kfree(ram_console_old_log);
+		ram_console_old_log = NULL;
+		return 0;
+	}
+
+	entry->proc_fops = &ram_console_file_ops;
+	entry->size = ram_console_old_log_size;
+	return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+postcore_initcall(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
new file mode 100644
index 0000000..a64481c
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.c
@@ -0,0 +1,176 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include "timed_output.h"
+#include "timed_gpio.h"
+
+
+struct timed_gpio_data {
+	struct timed_output_dev dev;
+	struct hrtimer timer;
+	spinlock_t lock;
+	unsigned 	gpio;
+	int 		max_timeout;
+	u8 		active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+	struct timed_gpio_data *data =
+		container_of(timer, struct timed_gpio_data, timer);
+
+	gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
+	return HRTIMER_NORESTART;
+}
+
+static int gpio_get_time(struct timed_output_dev *dev)
+{
+	struct timed_gpio_data	*data =
+		container_of(dev, struct timed_gpio_data, dev);
+
+	if (hrtimer_active(&data->timer)) {
+		ktime_t r = hrtimer_get_remaining(&data->timer);
+		struct timeval t = ktime_to_timeval(r);
+		return t.tv_sec * 1000 + t.tv_usec / 1000;
+	} else
+		return 0;
+}
+
+static void gpio_enable(struct timed_output_dev *dev, int value)
+{
+	struct timed_gpio_data	*data =
+		container_of(dev, struct timed_gpio_data, dev);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&data->lock, flags);
+
+	/* cancel previous timer and set GPIO according to value */
+	hrtimer_cancel(&data->timer);
+	gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
+
+	if (value > 0) {
+		if (value > data->max_timeout)
+			value = data->max_timeout;
+
+		hrtimer_start(&data->timer,
+			ktime_set(value / 1000, (value % 1000) * 1000000),
+			HRTIMER_MODE_REL);
+	}
+
+	spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct timed_gpio *cur_gpio;
+	struct timed_gpio_data *gpio_data, *gpio_dat;
+	int i, j, ret = 0;
+
+	if (!pdata)
+		return -EBUSY;
+
+	gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
+			GFP_KERNEL);
+	if (!gpio_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pdata->num_gpios; i++) {
+		cur_gpio = &pdata->gpios[i];
+		gpio_dat = &gpio_data[i];
+
+		hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
+				HRTIMER_MODE_REL);
+		gpio_dat->timer.function = gpio_timer_func;
+		spin_lock_init(&gpio_dat->lock);
+
+		gpio_dat->dev.name = cur_gpio->name;
+		gpio_dat->dev.get_time = gpio_get_time;
+		gpio_dat->dev.enable = gpio_enable;
+		ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
+		if (ret >= 0) {
+			ret = timed_output_dev_register(&gpio_dat->dev);
+			if (ret < 0)
+				gpio_free(cur_gpio->gpio);
+		}
+		if (ret < 0) {
+			for (j = 0; j < i; j++) {
+				timed_output_dev_unregister(&gpio_data[i].dev);
+				gpio_free(gpio_data[i].gpio);
+			}
+			kfree(gpio_data);
+			return ret;
+		}
+
+		gpio_dat->gpio = cur_gpio->gpio;
+		gpio_dat->max_timeout = cur_gpio->max_timeout;
+		gpio_dat->active_low = cur_gpio->active_low;
+		gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+	}
+
+	platform_set_drvdata(pdev, gpio_data);
+
+	return 0;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < pdata->num_gpios; i++) {
+		timed_output_dev_unregister(&gpio_data[i].dev);
+		gpio_free(gpio_data[i].gpio);
+	}
+
+	kfree(gpio_data);
+
+	return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+	.probe		= timed_gpio_probe,
+	.remove		= timed_gpio_remove,
+	.driver		= {
+		.name		= TIMED_GPIO_NAME,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init timed_gpio_init(void)
+{
+	return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+	platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
new file mode 100644
index 0000000..a0e15f8
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.h
@@ -0,0 +1,33 @@
+/* include/linux/timed_gpio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
+
+#define TIMED_GPIO_NAME "timed-gpio"
+
+struct timed_gpio {
+	const char *name;
+	unsigned 	gpio;
+	int		max_timeout;
+	u8 		active_low;
+};
+
+struct timed_gpio_platform_data {
+	int 		num_gpios;
+	struct timed_gpio *gpios;
+};
+
+#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
new file mode 100644
index 0000000..f373422
--- /dev/null
+++ b/drivers/staging/android/timed_output.c
@@ -0,0 +1,123 @@
+/* drivers/misc/timed_output.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include "timed_output.h"
+
+static struct class *timed_output_class;
+static atomic_t device_count;
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct timed_output_dev *tdev = dev_get_drvdata(dev);
+	int remaining = tdev->get_time(tdev);
+
+	return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t enable_store(
+		struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct timed_output_dev *tdev = dev_get_drvdata(dev);
+	int value;
+
+	if (sscanf(buf, "%d", &value) != 1)
+		return -EINVAL;
+
+	tdev->enable(tdev, value);
+
+	return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+
+static int create_timed_output_class(void)
+{
+	if (!timed_output_class) {
+		timed_output_class = class_create(THIS_MODULE, "timed_output");
+		if (IS_ERR(timed_output_class))
+			return PTR_ERR(timed_output_class);
+		atomic_set(&device_count, 0);
+	}
+
+	return 0;
+}
+
+int timed_output_dev_register(struct timed_output_dev *tdev)
+{
+	int ret;
+
+	if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
+		return -EINVAL;
+
+	ret = create_timed_output_class();
+	if (ret < 0)
+		return ret;
+
+	tdev->index = atomic_inc_return(&device_count);
+	tdev->dev = device_create(timed_output_class, NULL,
+		MKDEV(0, tdev->index), NULL, tdev->name);
+	if (IS_ERR(tdev->dev))
+		return PTR_ERR(tdev->dev);
+
+	ret = device_create_file(tdev->dev, &dev_attr_enable);
+	if (ret < 0)
+		goto err_create_file;
+
+	dev_set_drvdata(tdev->dev, tdev);
+	tdev->state = 0;
+	return 0;
+
+err_create_file:
+	device_destroy(timed_output_class, MKDEV(0, tdev->index));
+	printk(KERN_ERR "timed_output: Failed to register driver %s\n",
+			tdev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_register);
+
+void timed_output_dev_unregister(struct timed_output_dev *tdev)
+{
+	device_remove_file(tdev->dev, &dev_attr_enable);
+	device_destroy(timed_output_class, MKDEV(0, tdev->index));
+	dev_set_drvdata(tdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
+
+static int __init timed_output_init(void)
+{
+	return create_timed_output_class();
+}
+
+static void __exit timed_output_exit(void)
+{
+	class_destroy(timed_output_class);
+}
+
+module_init(timed_output_init);
+module_exit(timed_output_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed output class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
new file mode 100644
index 0000000..ec907ab
--- /dev/null
+++ b/drivers/staging/android/timed_output.h
@@ -0,0 +1,37 @@
+/* include/linux/timed_output.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_OUTPUT_H
+#define _LINUX_TIMED_OUTPUT_H
+
+struct timed_output_dev {
+	const char	*name;
+
+	/* enable the output and set the timer */
+	void	(*enable)(struct timed_output_dev *sdev, int timeout);
+
+	/* returns the current number of milliseconds remaining on the timer */
+	int		(*get_time)(struct timed_output_dev *sdev);
+
+	/* private data */
+	struct device	*dev;
+	int		index;
+	int		state;
+};
+
+extern int timed_output_dev_register(struct timed_output_dev *dev);
+extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+
+#endif
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 0000000..5238591
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+	tristate "Switch class support"
+	help
+	  Say Y here to enable switch class support. This allows
+	  monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+	tristate "GPIO Swith support"
+	depends on GENERIC_GPIO
+	help
+	  Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 0000000..f7606ed
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH)		+= switch_class.o
+obj-$(CONFIG_SWITCH_GPIO)	+= switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644
index 0000000..e05fc25
--- /dev/null
+++ b/drivers/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ *  drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct switch_dev *sdev = (struct switch_dev *)
+		dev_get_drvdata(dev);
+
+	if (sdev->print_state) {
+		int ret = sdev->print_state(sdev, buf);
+		if (ret >= 0)
+			return ret;
+	}
+	return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct switch_dev *sdev = (struct switch_dev *)
+		dev_get_drvdata(dev);
+
+	if (sdev->print_name) {
+		int ret = sdev->print_name(sdev, buf);
+		if (ret >= 0)
+			return ret;
+	}
+	return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+	char name_buf[120];
+	char state_buf[120];
+	char *prop_buf;
+	char *envp[3];
+	int env_offset = 0;
+	int length;
+
+	if (sdev->state != state) {
+		sdev->state = state;
+
+		prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+		if (prop_buf) {
+			length = name_show(sdev->dev, NULL, prop_buf);
+			if (length > 0) {
+				if (prop_buf[length - 1] == '\n')
+					prop_buf[length - 1] = 0;
+				snprintf(name_buf, sizeof(name_buf),
+					"SWITCH_NAME=%s", prop_buf);
+				envp[env_offset++] = name_buf;
+			}
+			length = state_show(sdev->dev, NULL, prop_buf);
+			if (length > 0) {
+				if (prop_buf[length - 1] == '\n')
+					prop_buf[length - 1] = 0;
+				snprintf(state_buf, sizeof(state_buf),
+					"SWITCH_STATE=%s", prop_buf);
+				envp[env_offset++] = state_buf;
+			}
+			envp[env_offset] = NULL;
+			kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+			free_page((unsigned long)prop_buf);
+		} else {
+			printk(KERN_ERR "out of memory in switch_set_state\n");
+			kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+	if (!switch_class) {
+		switch_class = class_create(THIS_MODULE, "switch");
+		if (IS_ERR(switch_class))
+			return PTR_ERR(switch_class);
+		atomic_set(&device_count, 0);
+	}
+
+	return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+	int ret;
+
+	if (!switch_class) {
+		ret = create_switch_class();
+		if (ret < 0)
+			return ret;
+	}
+
+	sdev->index = atomic_inc_return(&device_count);
+	sdev->dev = device_create(switch_class, NULL,
+		MKDEV(0, sdev->index), NULL, sdev->name);
+	if (IS_ERR(sdev->dev))
+		return PTR_ERR(sdev->dev);
+
+	ret = device_create_file(sdev->dev, &dev_attr_state);
+	if (ret < 0)
+		goto err_create_file_1;
+	ret = device_create_file(sdev->dev, &dev_attr_name);
+	if (ret < 0)
+		goto err_create_file_2;
+
+	dev_set_drvdata(sdev->dev, sdev);
+	sdev->state = 0;
+	return 0;
+
+err_create_file_2:
+	device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+	device_destroy(switch_class, MKDEV(0, sdev->index));
+	printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+	device_remove_file(sdev->dev, &dev_attr_name);
+	device_remove_file(sdev->dev, &dev_attr_state);
+	device_destroy(switch_class, MKDEV(0, sdev->index));
+	dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+	return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+	class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644
index 0000000..7e9faa2
--- /dev/null
+++ b/drivers/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ *  drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+	struct switch_dev sdev;
+	unsigned gpio;
+	const char *name_on;
+	const char *name_off;
+	const char *state_on;
+	const char *state_off;
+	int irq;
+	struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+	int state;
+	struct gpio_switch_data	*data =
+		container_of(work, struct gpio_switch_data, work);
+
+	state = gpio_get_value(data->gpio);
+	switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_switch_data *switch_data =
+	    (struct gpio_switch_data *)dev_id;
+
+	schedule_work(&switch_data->work);
+	return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+	struct gpio_switch_data	*switch_data =
+		container_of(sdev, struct gpio_switch_data, sdev);
+	const char *state;
+	if (switch_get_state(sdev))
+		state = switch_data->state_on;
+	else
+		state = switch_data->state_off;
+
+	if (state)
+		return sprintf(buf, "%s\n", state);
+	return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+	struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+	struct gpio_switch_data *switch_data;
+	int ret = 0;
+
+	if (!pdata)
+		return -EBUSY;
+
+	switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+	if (!switch_data)
+		return -ENOMEM;
+
+	switch_data->sdev.name = pdata->name;
+	switch_data->gpio = pdata->gpio;
+	switch_data->name_on = pdata->name_on;
+	switch_data->name_off = pdata->name_off;
+	switch_data->state_on = pdata->state_on;
+	switch_data->state_off = pdata->state_off;
+	switch_data->sdev.print_state = switch_gpio_print_state;
+
+    ret = switch_dev_register(&switch_data->sdev);
+	if (ret < 0)
+		goto err_switch_dev_register;
+
+	ret = gpio_request(switch_data->gpio, pdev->name);
+	if (ret < 0)
+		goto err_request_gpio;
+
+	ret = gpio_direction_input(switch_data->gpio);
+	if (ret < 0)
+		goto err_set_gpio_input;
+
+	INIT_WORK(&switch_data->work, gpio_switch_work);
+
+	switch_data->irq = gpio_to_irq(switch_data->gpio);
+	if (switch_data->irq < 0) {
+		ret = switch_data->irq;
+		goto err_detect_irq_num_failed;
+	}
+
+	ret = request_irq(switch_data->irq, gpio_irq_handler,
+			  IRQF_TRIGGER_LOW, pdev->name, switch_data);
+	if (ret < 0)
+		goto err_request_irq;
+
+	/* Perform initial detection */
+	gpio_switch_work(&switch_data->work);
+
+	return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+	gpio_free(switch_data->gpio);
+err_request_gpio:
+    switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+	kfree(switch_data);
+
+	return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+	struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+	cancel_work_sync(&switch_data->work);
+	gpio_free(switch_data->gpio);
+    switch_dev_unregister(&switch_data->sdev);
+	kfree(switch_data);
+
+	return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+	.probe		= gpio_switch_probe,
+	.remove		= __devexit_p(gpio_switch_remove),
+	.driver		= {
+		.name	= "switch-gpio",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init gpio_switch_init(void)
+{
+	return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+	platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index db7912c..62ba22f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -91,6 +91,9 @@
 	struct uart_state *state = tty->driver_data;
 	struct uart_port *port = state->uart_port;
 
+	if (port->ops->wake_peer)
+		port->ops->wake_peer(port);
+
 	if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
 	    !tty->stopped && !tty->hw_stopped)
 		port->ops->start_tx(port);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 029e288..144a8c8 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -935,6 +935,14 @@
 	  For more information, see Documentation/usb/gadget_printer.txt
 	  which includes sample code for accessing the device file.
 
+config USB_G_ANDROID
+	boolean "Android Gadget"
+	depends on SWITCH
+	help
+	  The Android gadget driver supports multiple USB functions.
+	  The functions can be configured via a board file and may be
+	  enabled and disabled dynamically.
+
 config USB_CDC_COMPOSITE
 	tristate "CDC Composite Device (Ethernet and ACM)"
 	depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 4fe92b1..ab17a4c 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -49,6 +49,7 @@
 g_nokia-y			:= nokia.o
 g_webcam-y			:= webcam.o
 g_ncm-y				:= ncm.o
+g_android-y			:= android.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
 obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
@@ -67,3 +68,4 @@
 obj-$(CONFIG_USB_G_NOKIA)	+= g_nokia.o
 obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
 obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
+obj-$(CONFIG_USB_G_ANDROID)	+= g_android.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644
index 0000000..ebe6766
--- /dev/null
+++ b/drivers/usb/gadget/android.c
@@ -0,0 +1,1150 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "composite.c"
+
+#include "f_mass_storage.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_adb.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID		0x18D1
+#define PRODUCT_ID		0x0001
+
+struct android_usb_function {
+	char *name;
+	void *config;
+
+	struct device *dev;
+	char *dev_name;
+	struct device_attribute **attributes;
+
+	/* for android_dev.enabled_functions */
+	struct list_head enabled_list;
+
+	/* Optional: initialization during gadget bind */
+	int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+	/* Optional: cleanup during gadget unbind */
+	void (*cleanup)(struct android_usb_function *);
+
+	int (*bind_config)(struct android_usb_function *, struct usb_configuration *);
+
+	/* Optional: called when the configuration is removed */
+	void (*unbind_config)(struct android_usb_function *, struct usb_configuration *);
+	/* Optional: handle ctrl requests before the device is configured
+	 *	and/or before the function is enabled */
+	int (*ctrlrequest)(struct android_usb_function *,
+					struct usb_composite_dev *,
+					const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+	struct android_usb_function **functions;
+	struct list_head enabled_functions;
+	struct usb_composite_dev *cdev;
+	struct device *dev;
+
+	bool enabled;
+	bool connected;
+	bool sw_connected;
+	struct work_struct work;
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_SERIAL_IDX		2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer_string,
+	[STRING_PRODUCT_IDX].s = product_string,
+	[STRING_SERIAL_IDX].s = serial_string,
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength              = sizeof(device_desc),
+	.bDescriptorType      = USB_DT_DEVICE,
+	.bcdUSB               = __constant_cpu_to_le16(0x0200),
+	.bDeviceClass         = USB_CLASS_PER_INTERFACE,
+	.idVendor             = __constant_cpu_to_le16(VENDOR_ID),
+	.idProduct            = __constant_cpu_to_le16(PRODUCT_ID),
+	.bcdDevice            = __constant_cpu_to_le16(0xffff),
+	.bNumConfigurations   = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+	.label		= "android",
+	.unbind		= android_unbind_config,
+	.bConfigurationValue = 1,
+	.bmAttributes	= USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bMaxPower	= 0xFA, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+	struct android_dev *dev = container_of(data, struct android_dev, work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+	unsigned long flags;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+        if (cdev->config) {
+		spin_unlock_irqrestore(&cdev->lock, flags);
+		kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE,
+							configured);
+		return;
+	}
+	if (dev->connected != dev->sw_connected) {
+		dev->sw_connected = dev->connected;
+		spin_unlock_irqrestore(&cdev->lock, flags);
+		kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE,
+				dev->sw_connected ? connected : disconnected);
+	} else {
+		spin_unlock_irqrestore(&cdev->lock, flags);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+static int adb_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+	return adb_setup();
+}
+
+static void adb_function_cleanup(struct android_usb_function *f)
+{
+	adb_cleanup();
+}
+
+static int adb_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+	return adb_bind_config(c);
+}
+
+static struct android_usb_function adb_function = {
+	.name		= "adb",
+	.init		= adb_function_init,
+	.cleanup	= adb_function_cleanup,
+	.bind_config	= adb_function_bind_config,
+};
+
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+	int instances;
+};
+
+static int acm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+
+	return gserial_setup(cdev->gadget, MAX_ACM_INSTANCES);
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+	gserial_cleanup();
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int acm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+	int i;
+	int ret = 0;
+	struct acm_function_config *config = f->config;
+
+	for (i = 0; i < config->instances; i++) {
+		ret = acm_bind_config(c, i);
+		if (ret) {
+			pr_err("Could not bind acm%u config\n", i);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct acm_function_config *config = f->config;
+	return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct acm_function_config *config = f->config;
+	int value;
+
+	sscanf(buf, "%d", &value);
+	if (value > MAX_ACM_INSTANCES)
+		value = MAX_ACM_INSTANCES;
+	config->instances = value;
+	return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show, acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = { &dev_attr_instances, NULL };
+
+static struct android_usb_function acm_function = {
+	.name		= "acm",
+	.init		= acm_function_init,
+	.cleanup	= acm_function_cleanup,
+	.bind_config	= acm_function_bind_config,
+	.attributes	= acm_function_attributes,
+};
+
+
+static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+	return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+	mtp_cleanup();
+}
+
+static int mtp_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+	return mtp_bind_config(c, false);
+}
+
+static int ptp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+	/* nothing to do - initialization is handled by mtp_function_init */
+	return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+	/* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int ptp_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
+{
+	return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+						struct usb_composite_dev *cdev,
+						const struct usb_ctrlrequest *c)
+{
+	return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+	.name		= "mtp",
+	.init		= mtp_function_init,
+	.cleanup	= mtp_function_cleanup,
+	.bind_config	= mtp_function_bind_config,
+	.ctrlrequest	= mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+	.name		= "ptp",
+	.init		= ptp_function_init,
+	.cleanup	= ptp_function_cleanup,
+	.bind_config	= ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+	u8      ethaddr[ETH_ALEN];
+	u32     vendorID;
+	char	manufacturer[256];
+	bool	wceis;
+};
+
+static int rndis_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int rndis_function_bind_config(struct android_usb_function *f,
+					struct usb_configuration *c)
+{
+	int ret;
+	struct rndis_function_config *rndis = f->config;
+
+	if (!rndis) {
+		pr_err("%s: rndis_pdata\n", __func__);
+		return -1;
+	}
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+	ret = gether_setup(c->cdev->gadget, rndis->ethaddr);
+	if (ret) {
+		pr_err("%s: gether_setup failed\n", __func__);
+		return ret;
+	}
+
+	if (rndis->wceis) {
+		/* "Wireless" RNDIS; auto-detected by Windows */
+		rndis_iad_descriptor.bFunctionClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_iad_descriptor.bFunctionSubClass = 0x01;
+		rndis_iad_descriptor.bFunctionProtocol = 0x03;
+		rndis_control_intf.bInterfaceClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_control_intf.bInterfaceSubClass =	 0x01;
+		rndis_control_intf.bInterfaceProtocol =	 0x03;
+	}
+
+	return rndis_bind_config(c, rndis->ethaddr, rndis->vendorID,
+				    rndis->manufacturer);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	gether_cleanup();
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+
+	if (size >= sizeof(config->manufacturer))
+		return -EINVAL;
+	if (sscanf(buf, "%s", config->manufacturer) == 1)
+		return size;
+	return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+						    rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		config->wceis = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+					     rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *rndis = f->config;
+	return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *rndis = f->config;
+
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		    (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+		    (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+		    (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+		return size;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+					       rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%04x", &value) == 1) {
+		config->vendorID = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+						rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+	&dev_attr_manufacturer,
+	&dev_attr_wceis,
+	&dev_attr_ethaddr,
+	&dev_attr_vendorID,
+	NULL
+};
+
+static struct android_usb_function rndis_function = {
+	.name		= "rndis",
+	.init		= rndis_function_init,
+	.cleanup	= rndis_function_cleanup,
+	.bind_config	= rndis_function_bind_config,
+	.unbind_config	= rndis_function_unbind_config,
+	.attributes	= rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+	struct fsg_config fsg;
+	struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	struct mass_storage_function_config *config;
+	struct fsg_common *common;
+	int err;
+
+	config = kzalloc(sizeof(struct mass_storage_function_config),
+								GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+
+	config->fsg.nluns = 1;
+	config->fsg.luns[0].removable = 1;
+
+	common = fsg_common_init(NULL, cdev, &config->fsg);
+	if (IS_ERR(common)) {
+		kfree(config);
+		return PTR_ERR(common);
+	}
+
+	err = sysfs_create_link(&f->dev->kobj,
+				&common->luns[0].dev.kobj,
+				"lun");
+	if (err) {
+		kfree(config);
+		return err;
+	}
+
+	config->common = common;
+	f->config = config;
+	return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct mass_storage_function_config *config = f->config;
+	return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct mass_storage_function_config *config = f->config;
+	return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct mass_storage_function_config *config = f->config;
+	if (size >= sizeof(config->common->inquiry_string))
+		return -EINVAL;
+	if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+		return -EINVAL;
+	return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+					mass_storage_inquiry_show,
+					mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+	&dev_attr_inquiry_string,
+	NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+	.name		= "mass_storage",
+	.init		= mass_storage_function_init,
+	.cleanup	= mass_storage_function_cleanup,
+	.bind_config	= mass_storage_function_bind_config,
+	.attributes	= mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+	acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+						struct usb_composite_dev *cdev,
+						const struct usb_ctrlrequest *c)
+{
+	return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+	.name		= "accessory",
+	.init		= accessory_function_init,
+	.cleanup	= accessory_function_cleanup,
+	.bind_config	= accessory_function_bind_config,
+	.ctrlrequest	= accessory_function_ctrlrequest,
+};
+
+
+static struct android_usb_function *supported_functions[] = {
+	&adb_function,
+	&acm_function,
+	&mtp_function,
+	&ptp_function,
+	&rndis_function,
+	&mass_storage_function,
+	&accessory_function,
+	NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+				  struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	struct android_usb_function *f;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	int err;
+	int index = 0;
+
+	for (; (f = *functions++); index++) {
+		f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+		f->dev = device_create(android_class, dev->dev,
+				MKDEV(0, index), f, f->dev_name);
+		if (IS_ERR(f->dev)) {
+			pr_err("%s: Failed to create dev %s", __func__,
+							f->dev_name);
+			err = PTR_ERR(f->dev);
+			goto err_create;
+		}
+
+		if (f->init) {
+			err = f->init(f, cdev);
+			if (err) {
+				pr_err("%s: Failed to init %s", __func__,
+								f->name);
+				goto err_out;
+			}
+		}
+
+		attrs = f->attributes;
+		if (attrs) {
+			while ((attr = *attrs++) && !err)
+				err = device_create_file(f->dev, attr);
+		}
+		if (err) {
+			pr_err("%s: Failed to create function %s attributes",
+					__func__, f->name);
+			goto err_out;
+		}
+	}
+	return 0;
+
+err_out:
+	device_destroy(android_class, f->dev->devt);
+err_create:
+	kfree(f->dev_name);
+	return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+	struct android_usb_function *f;
+
+	while (*functions) {
+		f = *functions++;
+
+		if (f->dev) {
+			device_destroy(android_class, f->dev->devt);
+			kfree(f->dev_name);
+		}
+
+		if (f->cleanup)
+			f->cleanup(f);
+	}
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+			       struct usb_configuration *c)
+{
+	struct android_usb_function *f;
+	int ret;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		ret = f->bind_config(f, c);
+		if (ret) {
+			pr_err("%s: %s failed", __func__, f->name);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+			       struct usb_configuration *c)
+{
+	struct android_usb_function *f;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		if (f->unbind_config)
+			f->unbind_config(f, c);
+	}
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+	struct android_usb_function **functions = dev->functions;
+	struct android_usb_function *f;
+	while ((f = *functions++)) {
+		if (!strcmp(name, f->name)) {
+			list_add_tail(&f->enabled_list, &dev->enabled_functions);
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_usb_function *f;
+	char *buff = buf;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+		buff += sprintf(buff, "%s,", f->name);
+	if (buff != buf)
+		*(buff-1) = '\n';
+	return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+			       const char *buff, size_t size)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	char *name;
+	char buf[256], *b;
+	int err;
+
+	INIT_LIST_HEAD(&dev->enabled_functions);
+
+	strncpy(buf, buff, sizeof(buf));
+	b = strim(buf);
+
+	while (b) {
+		name = strsep(&b, ",");
+		if (name) {
+			err = android_enable_function(dev, name);
+			if (err)
+				pr_err("android_usb: Cannot enable '%s'", name);
+		}
+	}
+
+	return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+			    const char *buff, size_t size)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int enabled = 0;
+
+	sscanf(buff, "%d", &enabled);
+	if (enabled && !dev->enabled) {
+		/* update values in composite driver's copy of device descriptor */
+		cdev->desc.idVendor = device_desc.idVendor;
+		cdev->desc.idProduct = device_desc.idProduct;
+		cdev->desc.bcdDevice = device_desc.bcdDevice;
+		cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+		cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+		cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+		usb_add_config(cdev, &android_config_driver,
+					android_bind_config);
+		usb_gadget_connect(cdev->gadget);
+		dev->enabled = true;
+	} else if (!enabled && dev->enabled) {
+		usb_gadget_disconnect(cdev->gadget);
+		usb_remove_config(cdev, &android_config_driver);
+		dev->enabled = false;
+	} else {
+		pr_err("android_usb: already %s\n",
+				dev->enabled ? "enabled" : "disabled");
+	}
+	return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev = dev->cdev;
+	char *state = "DISCONNECTED";
+	unsigned long flags;
+
+	if (!cdev)
+		goto out;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+        if (cdev->config)
+		state = "CONFIGURED";
+	else if (dev->connected)
+		state = "CONNECTED";
+	spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+	return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, format_string, device_desc.field);		\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)		       		\
+{									\
+	int value;					       		\
+	if (sscanf(buf, format_string, &value) == 1) {			\
+		device_desc.field = value;				\
+		return size;						\
+	}								\
+	return -1;							\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, "%s", buffer);				\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)		       		\
+{									\
+	if (size >= sizeof(buffer)) return -EINVAL;			\
+	if (sscanf(buf, "%s", buffer) == 1) {			       	\
+		return size;						\
+	}								\
+	return -1;							\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show, functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+	&dev_attr_idVendor,
+	&dev_attr_idProduct,
+	&dev_attr_bcdDevice,
+	&dev_attr_bDeviceClass,
+	&dev_attr_bDeviceSubClass,
+	&dev_attr_bDeviceProtocol,
+	&dev_attr_iManufacturer,
+	&dev_attr_iProduct,
+	&dev_attr_iSerial,
+	&dev_attr_functions,
+	&dev_attr_enable,
+	&dev_attr_state,
+	NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+	struct android_dev *dev = _android_dev;
+	int ret = 0;
+
+	ret = android_bind_enabled_functions(dev, c);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+	struct android_dev *dev = _android_dev;
+
+	android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			gcnum, id, ret;
+
+	usb_gadget_disconnect(gadget);
+
+	ret = android_init_functions(dev->functions, cdev);
+	if (ret)
+		return ret;
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_MANUFACTURER_IDX].id = id;
+	device_desc.iManufacturer = id;
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_PRODUCT_IDX].id = id;
+	device_desc.iProduct = id;
+
+	/* Default strings - should be updated by userspace */
+	strncpy(manufacturer_string, "Android", sizeof(manufacturer_string) - 1);
+	strncpy(product_string, "Android", sizeof(product_string) - 1);
+	strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_SERIAL_IDX].id = id;
+	device_desc.iSerialNumber = id;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+	else {
+		/* gadget zero is so simple (for now, no altsettings) that
+		 * it SHOULD NOT have problems with bulk-capable hardware.
+		 * so just warn about unrcognized controllers -- don't panic.
+		 *
+		 * things like configuration and altsetting numbering
+		 * can need hardware-specific attention though.
+		 */
+		pr_warning("%s: controller '%s' not recognized\n",
+			longname, gadget->name);
+		device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+	}
+
+	usb_gadget_set_selfpowered(gadget);
+	dev->cdev = cdev;
+
+	return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+
+	cancel_work_sync(&dev->work);
+	android_cleanup_functions(dev->functions);
+	return 0;
+}
+
+static struct usb_composite_driver android_usb_driver = {
+	.name		= "android_usb",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.unbind		= android_usb_unbind,
+};
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+	struct android_dev		*dev = _android_dev;
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_request		*req = cdev->req;
+	struct android_usb_function	**functions = dev->functions;
+	struct android_usb_function	*f;
+	int value = -EOPNOTSUPP;
+	unsigned long flags;
+
+	req->zero = 0;
+	req->complete = composite_setup_complete;
+	req->length = 0;
+	gadget->ep0->driver_data = cdev;
+
+	while ((f = *functions++)) {
+		if (f->ctrlrequest) {
+			value = f->ctrlrequest(f, cdev, c);
+			if (value >= 0)
+				break;
+		}
+	}
+
+	if (value < 0)
+		value = composite_setup(gadget, c);
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (!dev->connected) {
+		dev->connected = 1;
+		schedule_work(&dev->work);
+	}
+	else if (c->bRequest == USB_REQ_SET_CONFIGURATION && cdev->config) {
+		schedule_work(&dev->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+	struct android_dev *dev = _android_dev;
+	dev->connected = 0;
+	schedule_work(&dev->work);
+	composite_disconnect(gadget);
+}
+
+static int android_create_device(struct android_dev *dev)
+{
+	struct device_attribute **attrs = android_usb_attributes;
+	struct device_attribute *attr;
+	int err;
+
+	dev->dev = device_create(android_class, NULL,
+					MKDEV(0, 0), NULL, "android0");
+	if (IS_ERR(dev->dev))
+		return PTR_ERR(dev->dev);
+
+	dev_set_drvdata(dev->dev, dev);
+
+	while ((attr = *attrs++)) {
+		err = device_create_file(dev->dev, attr);
+		if (err) {
+			device_destroy(android_class, dev->dev->devt);
+			return err;
+		}
+	}
+	return 0;
+}
+
+
+static int __init init(void)
+{
+	struct android_dev *dev;
+	int err;
+
+	android_class = class_create(THIS_MODULE, "android_usb");
+	if (IS_ERR(android_class))
+		return PTR_ERR(android_class);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->functions = supported_functions;
+	INIT_LIST_HEAD(&dev->enabled_functions);
+	INIT_WORK(&dev->work, android_work);
+
+	err = android_create_device(dev);
+	if (err) {
+		class_destroy(android_class);
+		kfree(dev);
+		return err;
+	}
+
+	_android_dev = dev;
+
+	/* Override composite driver functions */
+	composite_driver.setup = android_setup;
+	composite_driver.disconnect = android_disconnect;
+
+	return usb_composite_probe(&android_usb_driver, android_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&android_usb_driver);
+	class_destroy(android_class);
+	kfree(_android_dev);
+	_android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5cbb1a4..dc06da6 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -476,6 +476,7 @@
 	power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW;
 done:
 	usb_gadget_vbus_draw(gadget, power);
+
 	if (result >= 0 && cdev->delayed_status)
 		result = USB_GADGET_DELAYED_STATUS;
 	return result;
@@ -523,6 +524,7 @@
 
 	INIT_LIST_HEAD(&config->functions);
 	config->next_interface_id = 0;
+	memset(config->interface, '\0', sizeof(config->interface));
 
 	status = bind(config);
 	if (status < 0) {
@@ -562,6 +564,45 @@
 	return status;
 }
 
+static int remove_config(struct usb_composite_dev *cdev,
+			      struct usb_configuration *config)
+{
+	while (!list_empty(&config->functions)) {
+		struct usb_function		*f;
+
+		f = list_first_entry(&config->functions,
+				struct usb_function, list);
+		list_del(&f->list);
+		if (f->unbind) {
+			DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+			f->unbind(config, f);
+			/* may free memory for "f" */
+		}
+	}
+	list_del(&config->list);
+	if (config->unbind) {
+		DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+		config->unbind(config);
+			/* may free memory for "c" */
+	}
+	return 0;
+}
+
+int usb_remove_config(struct usb_composite_dev *cdev,
+		      struct usb_configuration *config)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->config == config)
+		reset_config(cdev);
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return remove_config(cdev, config);
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* We support strings in multiple languages ... string descriptor zero
@@ -1041,28 +1082,9 @@
 
 	while (!list_empty(&cdev->configs)) {
 		struct usb_configuration	*c;
-
 		c = list_first_entry(&cdev->configs,
 				struct usb_configuration, list);
-		while (!list_empty(&c->functions)) {
-			struct usb_function		*f;
-
-			f = list_first_entry(&c->functions,
-					struct usb_function, list);
-			list_del(&f->list);
-			if (f->unbind) {
-				DBG(cdev, "unbind function '%s'/%p\n",
-						f->name, f);
-				f->unbind(c, f);
-				/* may free memory for "f" */
-			}
-		}
-		list_del(&c->list);
-		if (c->unbind) {
-			DBG(cdev, "unbind config '%s'/%p\n", c->label, c);
-			c->unbind(c);
-			/* may free memory for "c" */
-		}
+		remove_config(cdev, c);
 	}
 	if (composite->unbind)
 		composite->unbind(cdev);
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 0000000..873ab2e
--- /dev/null
+++ b/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,783 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    1
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	/* set to 1 when we connect */
+	int online:1;
+	/* Set to 1 when we disconnect.
+	 * Not cleared until our file is closed.
+	 */
+	int disconnected:1;
+
+	/* strings sent by the host */
+	char manufacturer[ACC_STRING_SIZE];
+	char model[ACC_STRING_SIZE];
+	char description[ACC_STRING_SIZE];
+	char version[ACC_STRING_SIZE];
+	char uri[ACC_STRING_SIZE];
+	char serial[ACC_STRING_SIZE];
+
+	/* for acc_complete_set_string */
+	int string_index;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+	struct delayed_work work;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_out_desc,
+	NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+	&acc_string_table,
+	NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+	return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+	dev->online = 0;
+	dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	if (req->status != 0)
+		acc_set_disconnected(dev);
+
+	req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		acc_set_disconnected(dev);
+
+	wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev	*dev = ep->driver_data;
+	char *string_dest = NULL;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_string, err %d\n", req->status);
+		return;
+	}
+
+	switch (dev->string_index) {
+	case ACCESSORY_STRING_MANUFACTURER:
+		string_dest = dev->manufacturer;
+		break;
+	case ACCESSORY_STRING_MODEL:
+		string_dest = dev->model;
+		break;
+	case ACCESSORY_STRING_DESCRIPTION:
+		string_dest = dev->description;
+		break;
+	case ACCESSORY_STRING_VERSION:
+		string_dest = dev->version;
+		break;
+	case ACCESSORY_STRING_URI:
+		string_dest = dev->uri;
+		break;
+	case ACCESSORY_STRING_SERIAL:
+		string_dest = dev->serial;
+		break;
+	}
+	if (string_dest) {
+		unsigned long flags;
+
+		if (length >= ACC_STRING_SIZE)
+			length = ACC_STRING_SIZE - 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		memcpy(string_dest, req->buf, length);
+		/* ensure zero termination */
+		string_dest[length] = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		pr_err("unknown accessory string index %d\n",
+			dev->string_index);
+	}
+}
+
+static int __init create_bulk_endpoints(struct acc_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_in;
+		req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "acc_bind() could not allocate requests\n");
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret = 0;
+
+	pr_debug("acc_read(%d)\n", count);
+
+	if (dev->disconnected)
+		return -ENODEV;
+
+	if (count > BULK_BUFFER_SIZE)
+		count = BULK_BUFFER_SIZE;
+
+	/* we will block until we're online */
+	pr_debug("acc_read: waiting for online\n");
+	ret = wait_event_interruptible(dev->read_wq, dev->online);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (dev->online) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	pr_debug("acc_read returning %d\n", r);
+	return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	pr_debug("acc_write(%d)\n", count);
+
+	if (!dev->online || dev->disconnected)
+		return -ENODEV;
+
+	while (count > 0) {
+		if (!dev->online) {
+			pr_debug("acc_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > BULK_BUFFER_SIZE)
+			xfer = BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		if (copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			pr_debug("acc_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		req_put(dev, &dev->tx_idle, req);
+
+	pr_debug("acc_write returning %d\n", r);
+	return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct acc_dev *dev = fp->private_data;
+	char *src = NULL;
+	int ret;
+
+	switch (code) {
+	case ACCESSORY_GET_STRING_MANUFACTURER:
+		src = dev->manufacturer;
+		break;
+	case ACCESSORY_GET_STRING_MODEL:
+		src = dev->model;
+		break;
+	case ACCESSORY_GET_STRING_DESCRIPTION:
+		src = dev->description;
+		break;
+	case ACCESSORY_GET_STRING_VERSION:
+		src = dev->version;
+		break;
+	case ACCESSORY_GET_STRING_URI:
+		src = dev->uri;
+		break;
+	case ACCESSORY_GET_STRING_SERIAL:
+		src = dev->serial;
+		break;
+	}
+	if (!src)
+		return -EINVAL;
+
+	ret = strlen(src) + 1;
+	if (copy_to_user((void __user *)value, src, ret))
+		ret = -EFAULT;
+	return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_open\n");
+	if (atomic_xchg(&_acc_dev->open_excl, 1))
+		return -EBUSY;
+
+	_acc_dev->disconnected = 0;
+	fp->private_data = _acc_dev;
+	return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_release\n");
+
+	WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+	_acc_dev->disconnected = 0;
+	return 0;
+}
+
+/* file operations for /dev/acc_usb */
+static const struct file_operations acc_fops = {
+	.owner = THIS_MODULE,
+	.read = acc_read,
+	.write = acc_write,
+	.unlocked_ioctl = acc_ioctl,
+	.open = acc_open,
+	.release = acc_release,
+};
+
+static struct miscdevice acc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_accessory",
+	.fops = &acc_fops,
+};
+
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct acc_dev	*dev = _acc_dev;
+	int	value = -EOPNOTSUPP;
+	u8 b_requestType = ctrl->bRequestType;
+	u8 b_request = ctrl->bRequest;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+
+/*
+	printk(KERN_INFO "acc_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			b_requestType, b_request,
+			w_value, w_index, w_length);
+*/
+
+	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_START) {
+			schedule_delayed_work(
+				&dev->work, msecs_to_jiffies(10));
+			value = 0;
+		} else if (b_request == ACCESSORY_SEND_STRING) {
+			dev->string_index = w_index;
+			cdev->gadget->ep0->driver_data = dev;
+			cdev->req->complete = acc_complete_set_string;
+			value = w_length;
+		}
+	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_GET_PROTOCOL) {
+			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			value = sizeof(u16);
+
+			/* clear any strings left over from a previous session */
+			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+			memset(dev->model, 0, sizeof(dev->model));
+			memset(dev->description, 0, sizeof(dev->description));
+			memset(dev->version, 0, sizeof(dev->version));
+			memset(dev->uri, 0, sizeof(dev->uri));
+			memset(dev->serial, 0, sizeof(dev->serial));
+		}
+	}
+
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "%s setup response queue error\n",
+				__func__);
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct acc_dev	*dev = func_to_dev(f);
+	int			id;
+	int			ret;
+
+	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	acc_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+			&acc_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acc_highspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_highspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+}
+
+static void acc_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=START", NULL };
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+	ret = usb_ep_enable(dev->ep_in,
+			ep_choose(cdev->gadget,
+				&acc_highspeed_in_desc,
+				&acc_fullspeed_in_desc));
+	if (ret)
+		return ret;
+	ret = usb_ep_enable(dev->ep_out,
+			ep_choose(cdev->gadget,
+				&acc_highspeed_out_desc,
+				&acc_fullspeed_out_desc));
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	dev->online = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "acc_function_disable\n");
+	acc_set_disconnected(dev);
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+	struct acc_dev *dev = _acc_dev;
+	int ret;
+
+	printk(KERN_INFO "acc_bind_config\n");
+
+	/* allocate a string ID for our interface */
+	if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		acc_interface_desc.iInterface = ret;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "accessory";
+	dev->function.strings = acc_strings,
+	dev->function.descriptors = fs_acc_descs;
+	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.bind = acc_function_bind;
+	dev->function.unbind = acc_function_unbind;
+	dev->function.set_alt = acc_function_set_alt;
+	dev->function.disable = acc_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+	struct acc_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	atomic_set(&dev->open_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_DELAYED_WORK(&dev->work, acc_work);
+
+	/* _acc_dev must be set before calling usb_gadget_register_driver */
+	_acc_dev = dev;
+
+	ret = misc_register(&acc_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	printk(KERN_ERR "USB accessory gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void acc_cleanup(void)
+{
+	misc_deregister(&acc_device);
+	kfree(_acc_dev);
+	_acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index bd6226c..68b1a8e 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -405,10 +405,10 @@
 			usb_ep_disable(acm->notify);
 		} else {
 			VDBG(cdev, "init acm ctrl interface %d\n", intf);
-			acm->notify_desc = ep_choose(cdev->gadget,
-					acm->hs.notify,
-					acm->fs.notify);
 		}
+		acm->notify_desc = ep_choose(cdev->gadget,
+				acm->hs.notify,
+				acm->fs.notify);
 		usb_ep_enable(acm->notify, acm->notify_desc);
 		acm->notify->driver_data = acm;
 
@@ -418,11 +418,11 @@
 			gserial_disconnect(&acm->port);
 		} else {
 			DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
-			acm->port.in_desc = ep_choose(cdev->gadget,
-					acm->hs.in, acm->fs.in);
-			acm->port.out_desc = ep_choose(cdev->gadget,
-					acm->hs.out, acm->fs.out);
 		}
+		acm->port.in_desc = ep_choose(cdev->gadget,
+				acm->hs.in, acm->fs.in);
+		acm->port.out_desc = ep_choose(cdev->gadget,
+				acm->hs.out, acm->fs.out);
 		gserial_connect(&acm->port, acm->port_num);
 
 	} else
@@ -697,6 +697,7 @@
 		usb_free_descriptors(f->hs_descriptors);
 	usb_free_descriptors(f->descriptors);
 	gs_free_req(acm->notify, acm->notify_req);
+	kfree(acm->port.func.name);
 	kfree(acm);
 }
 
@@ -768,7 +769,11 @@
 	acm->port.disconnect = acm_disconnect;
 	acm->port.send_break = acm_send_break;
 
-	acm->port.func.name = "acm";
+	acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num);
+	if (!acm->port.func.name) {
+		kfree(acm);
+		return -ENOMEM;
+	}
 	acm->port.func.strings = acm_strings;
 	/* descriptors are per-instance copies */
 	acm->port.func.bind = acm_bind;
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
new file mode 100644
index 0000000..fe4455e
--- /dev/null
+++ b/drivers/usb/gadget/f_adb.c
@@ -0,0 +1,607 @@
+/*
+ * Gadget Driver for Android ADB
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define ADB_BULK_BUFFER_SIZE           4096
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+static const char adb_shortname[] = "android_adb";
+
+struct adb_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	int online;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req;
+	int rx_done;
+};
+
+static struct usb_interface_descriptor adb_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = 0xFF,
+	.bInterfaceSubClass     = 0x42,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_adb_descs[] = {
+	(struct usb_descriptor_header *) &adb_interface_desc,
+	(struct usb_descriptor_header *) &adb_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &adb_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_adb_descs[] = {
+	(struct usb_descriptor_header *) &adb_interface_desc,
+	(struct usb_descriptor_header *) &adb_highspeed_in_desc,
+	(struct usb_descriptor_header *) &adb_highspeed_out_desc,
+	NULL,
+};
+
+
+/* temporary variable used between adb_open() and adb_gadget_bind() */
+static struct adb_dev *_adb_dev;
+
+static inline struct adb_dev *func_to_adb(struct usb_function *f)
+{
+	return container_of(f, struct adb_dev, function);
+}
+
+
+static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void adb_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int adb_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void adb_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void adb_req_put(struct adb_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *adb_req_get(struct adb_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void adb_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct adb_dev *dev = _adb_dev;
+
+	if (req->status != 0)
+		dev->error = 1;
+
+	adb_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void adb_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct adb_dev *dev = _adb_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		dev->error = 1;
+
+	wake_up(&dev->read_wq);
+}
+
+static int adb_create_bulk_endpoints(struct adb_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	req = adb_request_new(dev->ep_out, ADB_BULK_BUFFER_SIZE);
+	if (!req)
+		goto fail;
+	req->complete = adb_complete_out;
+	dev->rx_req = req;
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = adb_request_new(dev->ep_in, ADB_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = adb_complete_in;
+		adb_req_put(dev, &dev->tx_idle, req);
+	}
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "adb_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	struct adb_dev *dev = fp->private_data;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	pr_debug("adb_read(%d)\n", count);
+	if (!_adb_dev)
+		return -ENODEV;
+
+	if (count > ADB_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
+	if (adb_lock(&dev->read_excl))
+		return -EBUSY;
+
+	/* we will block until we're online */
+	while (!(dev->online || dev->error)) {
+		pr_debug("adb_read: waiting for online state\n");
+		ret = wait_event_interruptible(dev->read_wq,
+				(dev->online || dev->error));
+		if (ret < 0) {
+			adb_unlock(&dev->read_excl);
+			return ret;
+		}
+	}
+	if (dev->error) {
+		r = -EIO;
+		goto done;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req;
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+	if (ret < 0) {
+		pr_debug("adb_read: failed to queue req %p (%d)\n", req, ret);
+		r = -EIO;
+		dev->error = 1;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		dev->error = 1;
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (!dev->error) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+
+	} else
+		r = -EIO;
+
+done:
+	adb_unlock(&dev->read_excl);
+	pr_debug("adb_read returning %d\n", r);
+	return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+				 size_t count, loff_t *pos)
+{
+	struct adb_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	if (!_adb_dev)
+		return -ENODEV;
+	pr_debug("adb_write(%d)\n", count);
+
+	if (adb_lock(&dev->write_excl))
+		return -EBUSY;
+
+	while (count > 0) {
+		if (dev->error) {
+			pr_debug("adb_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = adb_req_get(dev, &dev->tx_idle)) || dev->error);
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			if (count > ADB_BULK_BUFFER_SIZE)
+				xfer = ADB_BULK_BUFFER_SIZE;
+			else
+				xfer = count;
+			if (copy_from_user(req->buf, buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+
+			req->length = xfer;
+			ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+			if (ret < 0) {
+				pr_debug("adb_write: xfer error %d\n", ret);
+				dev->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+
+	if (req)
+		adb_req_put(dev, &dev->tx_idle, req);
+
+	adb_unlock(&dev->write_excl);
+	pr_debug("adb_write returning %d\n", r);
+	return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "adb_open\n");
+	if (!_adb_dev)
+		return -ENODEV;
+
+	if (adb_lock(&_adb_dev->open_excl))
+		return -EBUSY;
+
+	fp->private_data = _adb_dev;
+
+	/* clear the error latch */
+	_adb_dev->error = 0;
+
+	return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "adb_release\n");
+	adb_unlock(&_adb_dev->open_excl);
+	return 0;
+}
+
+/* file operations for ADB device /dev/android_adb */
+static struct file_operations adb_fops = {
+	.owner = THIS_MODULE,
+	.read = adb_read,
+	.write = adb_write,
+	.open = adb_open,
+	.release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = adb_shortname,
+	.fops = &adb_fops,
+};
+
+
+
+
+static int
+adb_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct adb_dev	*dev = func_to_adb(f);
+	int			id;
+	int			ret;
+
+	dev->cdev = cdev;
+	DBG(cdev, "adb_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	adb_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = adb_create_bulk_endpoints(dev, &adb_fullspeed_in_desc,
+			&adb_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		adb_highspeed_in_desc.bEndpointAddress =
+			adb_fullspeed_in_desc.bEndpointAddress;
+		adb_highspeed_out_desc.bEndpointAddress =
+			adb_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+adb_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_request *req;
+
+
+	dev->online = 0;
+	dev->error = 1;
+
+	wake_up(&dev->read_wq);
+
+	adb_request_free(dev->rx_req, dev->ep_out);
+	while ((req = adb_req_get(dev, &dev->tx_idle)))
+		adb_request_free(req, dev->ep_in);
+}
+
+static int adb_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt);
+	ret = usb_ep_enable(dev->ep_in,
+			ep_choose(cdev->gadget,
+				&adb_highspeed_in_desc,
+				&adb_fullspeed_in_desc));
+	if (ret)
+		return ret;
+	ret = usb_ep_enable(dev->ep_out,
+			ep_choose(cdev->gadget,
+				&adb_highspeed_out_desc,
+				&adb_fullspeed_out_desc));
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	dev->online = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void adb_function_disable(struct usb_function *f)
+{
+	struct adb_dev	*dev = func_to_adb(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "adb_function_disable cdev %p\n", cdev);
+	dev->online = 0;
+	dev->error = 1;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int adb_bind_config(struct usb_configuration *c)
+{
+	struct adb_dev *dev = _adb_dev;
+
+	printk(KERN_INFO "adb_bind_config\n");
+
+	dev->cdev = c->cdev;
+	dev->function.name = "adb";
+	dev->function.descriptors = fs_adb_descs;
+	dev->function.hs_descriptors = hs_adb_descs;
+	dev->function.bind = adb_function_bind;
+	dev->function.unbind = adb_function_unbind;
+	dev->function.set_alt = adb_function_set_alt;
+	dev->function.disable = adb_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int adb_setup(void)
+{
+	struct adb_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->read_excl, 0);
+	atomic_set(&dev->write_excl, 0);
+
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	_adb_dev = dev;
+
+	ret = misc_register(&adb_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	printk(KERN_ERR "adb gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void adb_cleanup(void)
+{
+	misc_deregister(&adb_device);
+
+	kfree(_adb_dev);
+	_adb_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index efb58f9..5440c6d 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -3052,7 +3052,7 @@
 	if (unlikely(!fsg))
 		return -ENOMEM;
 
-	fsg->function.name        = FSG_DRIVER_DESC;
+	fsg->function.name        = "mass_storage";
 	fsg->function.strings     = fsg_strings_array;
 	fsg->function.bind        = fsg_bind;
 	fsg->function.unbind      = fsg_unbind;
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 0000000..a5b6463
--- /dev/null
+++ b/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1273 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+	struct usb_ep *ep_intr;
+
+	int state;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+	/* to enforce only one ioctl at a time */
+	atomic_t ioctl_excl;
+
+	struct list_head tx_idle;
+	struct list_head intr_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	wait_queue_head_t intr_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* for processing MTP_SEND_FILE and MTP_RECEIVE_FILE
+	 * ioctls on a work queue
+	 */
+	struct workqueue_struct *wq;
+	struct work_struct send_file_work;
+	struct work_struct receive_file_work;
+	struct file *xfer_file;
+	loff_t xfer_file_offset;
+	int64_t xfer_file_length;
+	int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+	.bInterfaceSubClass     = 1,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+	.bInterval              = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+	/* Naming interface "MTP" so libmtp will recognize us */
+	[INTERFACE_STRING_INDEX].s	= "MTP",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+	&mtp_string_table,
+	NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+	18, /* sizeof(mtp_os_string) */
+	USB_DT_STRING,
+	/* Signature field: "MSFT100" */
+	'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+	/* vendor code */
+	1,
+	/* padding */
+	0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+	__le32	dwLength;
+	__u16	bcdVersion;
+	__le16	wIndex;
+	__u8	bCount;
+	__u8	reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+	__u8	bFirstInterfaceNumber;
+	__u8	bInterfaceCount;
+	__u8	compatibleID[8];
+	__u8	subCompatibleID[8];
+	__u8	reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+	struct mtp_ext_config_desc_header	header;
+	struct mtp_ext_config_desc_function    function;
+} mtp_ext_config_desc = {
+	.header = {
+		.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+		.bcdVersion = __constant_cpu_to_le16(0x0100),
+		.wIndex = __constant_cpu_to_le16(4),
+		.bCount = __constant_cpu_to_le16(1),
+	},
+	.function = {
+		.bFirstInterfaceNumber = 0,
+		.bInterfaceCount = 1,
+		.compatibleID = { 'M', 'T', 'P' },
+	},
+};
+
+struct mtp_device_status {
+	__le16	wLength;
+	__le16	wCode;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+	return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->intr_idle, req);
+
+	wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc,
+				struct usb_endpoint_descriptor *intr_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_intr = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_in;
+		mtp_req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_out;
+		dev->rx_req[i] = req;
+	}
+	for (i = 0; i < INTR_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_intr;
+		mtp_req_put(dev, &dev->intr_idle, req);
+	}
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret = 0;
+
+	DBG(cdev, "mtp_read(%d)\n", count);
+
+	if (count > MTP_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
+	/* we will block until we're online */
+	DBG(cdev, "mtp_read: waiting for online state\n");
+	ret = wait_event_interruptible(dev->read_wq,
+		dev->state != STATE_OFFLINE);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		DBG(cdev, "rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (dev->state == STATE_BUSY) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		DBG(cdev, "rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_read returning %d\n", r);
+	return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int sendZLP = 0;
+	int ret;
+
+	DBG(cdev, "mtp_write(%d)\n", count);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
+		sendZLP = 1;
+	}
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		if (dev->state != STATE_BUSY) {
+			DBG(cdev, "mtp_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = mtp_req_get(dev, &dev->tx_idle))
+				|| dev->state != STATE_BUSY));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		if (xfer && copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "mtp_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_write returning %d\n", r);
+	return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data) {
+	struct mtp_dev	*dev = container_of(data, struct mtp_dev, send_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int xfer, ret;
+	int r = 0;
+	int sendZLP = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((dev->xfer_file_length & (dev->ep_in->maxpacket - 1)) == 0) {
+		sendZLP = 1;
+	}
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = mtp_req_get(dev, &dev->tx_idle))
+			|| dev->state != STATE_BUSY);
+		if (dev->state == STATE_CANCELED) {
+			r = -ECANCELED;
+			break;
+		}
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		ret = vfs_read(filp, req->buf, xfer, &offset);
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+		xfer = ret;
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "send_file_work: xfer error %d\n", ret);
+			dev->state = STATE_ERROR;
+			r = -EIO;
+			break;
+		}
+
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	DBG(cdev, "send_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+	struct mtp_dev	*dev = container_of(data, struct mtp_dev, receive_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *read_req = NULL, *write_req = NULL;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int ret, cur_buf = 0;
+	int r = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "receive_file_work(%lld)\n", count);
+
+	while (count > 0 || write_req) {
+		if (count > 0) {
+			/* queue a request */
+			read_req = dev->rx_req[cur_buf];
+			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+			read_req->length = (count > MTP_BULK_BUFFER_SIZE
+					? MTP_BULK_BUFFER_SIZE : count);
+			dev->rx_done = 0;
+			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+			if (ret < 0) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+		}
+
+		if (write_req) {
+			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+			ret = vfs_write(filp, write_req->buf, write_req->actual,
+				&offset);
+			DBG(cdev, "vfs_write %d\n", ret);
+			if (ret != write_req->actual) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+			write_req = NULL;
+		}
+
+		if (read_req) {
+			/* wait for our last read to complete */
+			ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+			if (dev->state == STATE_CANCELED) {
+				r = -ECANCELED;
+				if (!dev->rx_done)
+					usb_ep_dequeue(dev->ep_out, read_req);
+				break;
+			}
+			/* if xfer_file_length is 0xFFFFFFFF, then we read until
+			 * we get a zero length packet
+			 */
+			if (count != 0xFFFFFFFF)
+				count -= read_req->actual;
+			if (read_req->actual < read_req->length) {
+				/* short packet is used to signal EOF for sizes > 4 gig */
+				DBG(cdev, "got short packet\n");
+				count = 0;
+			}
+
+			write_req = read_req;
+			read_req = NULL;
+		}
+	}
+
+	DBG(cdev, "receive_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+	struct usb_request *req= NULL;
+	int ret;
+	int length = event->length;
+
+	DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
+
+	if (length < 0 || length > INTR_BUFFER_SIZE)
+		return -EINVAL;
+	if (dev->state == STATE_OFFLINE)
+		return -ENODEV;
+
+	ret = wait_event_interruptible_timeout(dev->intr_wq,
+		(req = mtp_req_get(dev, &dev->intr_idle)), msecs_to_jiffies(1000));
+	if (!req)
+	    return -ETIME;
+
+	if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+		mtp_req_put(dev, &dev->intr_idle, req);
+		return -EFAULT;
+	}
+	req->length = length;
+	ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+	if (ret)
+		mtp_req_put(dev, &dev->intr_idle, req);
+
+	return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct file *filp = NULL;
+	int ret = -EINVAL;
+
+	if (mtp_lock(&dev->ioctl_excl))
+		return -EBUSY;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	{
+		struct mtp_file_range	mfr;
+		struct work_struct *work;
+
+		spin_lock_irq(&dev->lock);
+		if (dev->state == STATE_CANCELED) {
+			/* report cancelation to userspace */
+			dev->state = STATE_READY;
+			spin_unlock_irq(&dev->lock);
+			ret = -ECANCELED;
+			goto out;
+		}
+		if (dev->state == STATE_OFFLINE) {
+			spin_unlock_irq(&dev->lock);
+			ret = -ENODEV;
+			goto out;
+		}
+		dev->state = STATE_BUSY;
+		spin_unlock_irq(&dev->lock);
+
+		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		/* hold a reference to the file while we are working with it */
+		filp = fget(mfr.fd);
+		if (!filp) {
+			ret = -EBADF;
+			goto fail;
+		}
+
+		/* write the parameters */
+		dev->xfer_file = filp;
+		dev->xfer_file_offset = mfr.offset;
+		dev->xfer_file_length = mfr.length;
+		smp_wmb();
+
+		if (code == MTP_SEND_FILE)
+			work = &dev->send_file_work;
+		else
+			work = &dev->receive_file_work;
+
+		/* We do the file transfer on a work queue so it will run
+		 * in kernel context, which is necessary for vfs_read and
+		 * vfs_write to use our buffers in the kernel address space.
+		 */
+		queue_work(dev->wq, work);
+		/* wait for operation to complete */
+		flush_workqueue(dev->wq);
+		fput(filp);
+
+		/* read the result */
+		smp_rmb();
+		ret = dev->xfer_result;
+		break;
+	}
+	case MTP_SEND_EVENT:
+	{
+		struct mtp_event	event;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+			ret = -EFAULT;
+		else
+			ret = mtp_send_event(dev, &event);
+		goto out;
+	}
+	}
+
+fail:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		ret = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+out:
+	mtp_unlock(&dev->ioctl_excl);
+	DBG(dev->cdev, "ioctl returning %d\n", ret);
+	return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_open\n");
+	if (mtp_lock(&_mtp_dev->open_excl))
+		return -EBUSY;
+
+	/* clear any error condition */
+	if (_mtp_dev->state != STATE_OFFLINE)
+		_mtp_dev->state = STATE_READY;
+
+	fp->private_data = _mtp_dev;
+	return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_release\n");
+
+	mtp_unlock(&_mtp_dev->open_excl);
+	return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+	.owner = THIS_MODULE,
+	.read = mtp_read,
+	.write = mtp_write,
+	.unlocked_ioctl = mtp_ioctl,
+	.open = mtp_open,
+	.release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = mtp_shortname,
+	.fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+
+	VDBG(cdev, "mtp_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	/* Handle MTP OS string */
+	if (ctrl->bRequestType ==
+			(USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+			&& ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+			&& (w_value >> 8) == USB_DT_STRING
+			&& (w_value & 0xFF) == MTP_OS_STRING_ID) {
+		value = (w_length < sizeof(mtp_os_string)
+				? w_length : sizeof(mtp_os_string));
+		memcpy(cdev->req->buf, mtp_os_string, value);
+	}
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s setup response queue error\n", __func__);
+	}
+	return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct mtp_dev	*dev = func_to_mtp(f);
+	int			id;
+	int			ret;
+
+	dev->cdev = cdev;
+	DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	mtp_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+			&mtp_fullspeed_out_desc, &mtp_intr_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		mtp_highspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_highspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = mtp_req_get(dev, &dev->tx_idle)))
+		mtp_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		mtp_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = mtp_req_get(dev, &dev->intr_idle)))
+		mtp_request_free(req, dev->ep_intr);
+	dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_setup(struct usb_function *f,
+					const struct usb_ctrlrequest *ctrl)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long	flags;
+
+	VDBG(cdev, "mtp_function_setup "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+		/* Handle MTP OS descriptor */
+		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == 1
+				&& (ctrl->bRequestType & USB_DIR_IN)
+				&& (w_index == 4 || w_index == 5)) {
+			value = (w_length < sizeof(mtp_ext_config_desc) ?
+					w_length : sizeof(mtp_ext_config_desc));
+			memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+		}
+	}
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+				&& w_value == 0) {
+			DBG(cdev, "MTP_REQ_CANCEL\n");
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->state == STATE_BUSY) {
+				dev->state = STATE_CANCELED;
+				wake_up(&dev->read_wq);
+				wake_up(&dev->write_wq);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* We need to queue a request to read the remaining
+			 *  bytes, but we don't actually need to look at
+			 * the contents.
+			 */
+			value = w_length;
+		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+				&& w_index == 0 && w_value == 0) {
+			struct mtp_device_status *status = cdev->req->buf;
+			status->wLength =
+				__constant_cpu_to_le16(sizeof(*status));
+
+			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			/* device status is "busy" until we report
+			 * the cancelation to userspace
+			 */
+			if (dev->state == STATE_CANCELED)
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+			else
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_OK);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			value = sizeof(*status);
+		}
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s setup response queue error\n", __func__);
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	return value;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+	ret = usb_ep_enable(dev->ep_in,
+			ep_choose(cdev->gadget,
+				&mtp_highspeed_in_desc,
+				&mtp_fullspeed_in_desc));
+	if (ret)
+		return ret;
+	ret = usb_ep_enable(dev->ep_out,
+			ep_choose(cdev->gadget,
+				&mtp_highspeed_out_desc,
+				&mtp_fullspeed_out_desc));
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc);
+	if (ret) {
+		usb_ep_disable(dev->ep_out);
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	dev->state = STATE_READY;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "mtp_function_disable\n");
+	dev->state = STATE_OFFLINE;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+	usb_ep_disable(dev->ep_intr);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int ret = 0;
+
+	printk(KERN_INFO "mtp_bind_config\n");
+
+	/* allocate a string ID for our interface */
+	if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		mtp_interface_desc.iInterface = ret;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "mtp";
+	dev->function.strings = mtp_strings;
+	if (ptp_config) {
+		dev->function.descriptors = fs_ptp_descs;
+		dev->function.hs_descriptors = hs_ptp_descs;
+	} else {
+		dev->function.descriptors = fs_mtp_descs;
+		dev->function.hs_descriptors = hs_mtp_descs;
+	}
+	dev->function.bind = mtp_function_bind;
+	dev->function.unbind = mtp_function_unbind;
+	dev->function.setup = mtp_function_setup;
+	dev->function.set_alt = mtp_function_set_alt;
+	dev->function.disable = mtp_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+	struct mtp_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	init_waitqueue_head(&dev->intr_wq);
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->ioctl_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->intr_idle);
+
+	dev->wq = create_singlethread_workqueue("f_mtp");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	INIT_WORK(&dev->send_file_work, send_file_work);
+	INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+	_mtp_dev = dev;
+
+	ret = misc_register(&mtp_device);
+	if (ret)
+		goto err2;
+
+	return 0;
+
+err2:
+	destroy_workqueue(dev->wq);
+err1:
+	_mtp_dev = NULL;
+	kfree(dev);
+	printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void mtp_cleanup(void)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (!dev)
+		return;
+
+	misc_deregister(&mtp_device);
+	destroy_workqueue(dev->wq);
+	_mtp_dev = NULL;
+	kfree(dev);
+}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index fa12ec8..d03b11b 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -26,7 +26,7 @@
 
 #include <linux/slab.h>
 #include <linux/kernel.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
 #include <linux/etherdevice.h>
 
 #include <asm/atomic.h>
@@ -86,8 +86,11 @@
 	struct gether			port;
 	u8				ctrl_id, data_id;
 	u8				ethaddr[ETH_ALEN];
+	u32				vendorID;
+	const char			*manufacturer;
 	int				config;
 
+
 	struct rndis_ep_descs		fs;
 	struct rndis_ep_descs		hs;
 
@@ -187,12 +190,11 @@
 rndis_iad_descriptor = {
 	.bLength =		sizeof rndis_iad_descriptor,
 	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
-
 	.bFirstInterface =	0, /* XXX, hardcoded */
 	.bInterfaceCount = 	2,	// control + data
 	.bFunctionClass =	USB_CLASS_COMM,
 	.bFunctionSubClass =	USB_CDC_SUBCLASS_ETHERNET,
-	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	.bFunctionProtocol =	USB_CDC_ACM_PROTO_VENDOR,
 	/* .iFunction = DYNAMIC */
 };
 
@@ -486,10 +488,10 @@
 			usb_ep_disable(rndis->notify);
 		} else {
 			VDBG(cdev, "init rndis ctrl %d\n", intf);
-			rndis->notify_desc = ep_choose(cdev->gadget,
-					rndis->hs.notify,
-					rndis->fs.notify);
 		}
+		rndis->notify_desc = ep_choose(cdev->gadget,
+				rndis->hs.notify,
+				rndis->fs.notify);
 		usb_ep_enable(rndis->notify, rndis->notify_desc);
 		rndis->notify->driver_data = rndis;
 
@@ -503,11 +505,11 @@
 
 		if (!rndis->port.in) {
 			DBG(cdev, "init rndis\n");
-			rndis->port.in = ep_choose(cdev->gadget,
-					rndis->hs.in, rndis->fs.in);
-			rndis->port.out = ep_choose(cdev->gadget,
-					rndis->hs.out, rndis->fs.out);
 		}
+		rndis->port.in = ep_choose(cdev->gadget,
+				rndis->hs.in, rndis->fs.in);
+		rndis->port.out = ep_choose(cdev->gadget,
+				rndis->hs.out, rndis->fs.out);
 
 		/* Avoid ZLPs; they can be troublesome. */
 		rndis->port.is_zlp_ok = false;
@@ -706,12 +708,9 @@
 	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
 	rndis_set_host_mac(rndis->config, rndis->ethaddr);
 
-#if 0
-// FIXME
-	if (rndis_set_param_vendor(rndis->config, vendorID,
-				manufacturer))
-		goto fail0;
-#endif
+	if (rndis_set_param_vendor(rndis->config, rndis->vendorID,
+				   rndis->manufacturer))
+			goto fail;
 
 	/* NOTE:  all that is done without knowing or caring about
 	 * the network link ... which is unavailable to this code
@@ -756,6 +755,8 @@
 	rndis_deregister(rndis->config);
 	rndis_exit();
 
+	rndis_string_defs[0].id = 0;
+
 	if (gadget_is_dualspeed(c->cdev->gadget))
 		usb_free_descriptors(f->hs_descriptors);
 	usb_free_descriptors(f->descriptors);
@@ -786,7 +787,8 @@
  * for calling @gether_cleanup() before module unload.
  */
 int
-rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer)
 {
 	struct f_rndis	*rndis;
 	int		status;
@@ -831,6 +833,8 @@
 		goto fail;
 
 	memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+	rndis->vendorID = vendorID;
+	rndis->manufacturer = manufacturer;
 
 	/* RNDIS activates when the host changes this filter */
 	rndis->port.cdc_filter = 0;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index d3cdffe..6cea2e1 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -159,6 +159,25 @@
 #endif	/* RNDIS_PM */
 };
 
+/* HACK: copied from net/core/dev.c to replace dev_get_stats since
+ * dev_get_stats cannot be called from atomic context */
+static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+				    const struct net_device_stats *netdev_stats)
+{
+#if BITS_PER_LONG == 64
+	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
+	memcpy(stats64, netdev_stats, sizeof(*stats64));
+#else
+	size_t i, n = sizeof(*stats64) / sizeof(u64);
+	const unsigned long *src = (const unsigned long *)netdev_stats;
+	u64 *dst = (u64 *)stats64;
+
+	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
+		     sizeof(*stats64) / sizeof(u64));
+	for (i = 0; i < n; i++)
+		dst[i] = src[i];
+#endif
+}
 
 /* NDIS Functions */
 static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
@@ -171,7 +190,7 @@
 	rndis_query_cmplt_type *resp;
 	struct net_device *net;
 	struct rtnl_link_stats64 temp;
-	const struct rtnl_link_stats64 *stats;
+	struct rtnl_link_stats64 *stats = &temp;
 
 	if (!r) return -ENOMEM;
 	resp = (rndis_query_cmplt_type *)r->buf;
@@ -194,7 +213,7 @@
 	resp->InformationBufferOffset = cpu_to_le32(16);
 
 	net = rndis_per_dev_params[configNr].dev;
-	stats = dev_get_stats(net, &temp);
+	netdev_stats_to_stats64(stats, &net->stats);
 
 	switch (OID) {
 
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 1fa4f70..a872248 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -763,10 +763,16 @@
 	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
 	int		rc = 0;
 
+
+#ifndef CONFIG_USB_ANDROID_MASS_STORAGE
+	/* disabled in android because we need to allow closing the backing file
+	 * if the media was removed
+	 */
 	if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
 		LDBG(curlun, "eject attempt prevented\n");
 		return -EBUSY;				/* "Door is locked" */
 	}
+#endif
 
 	/* Remove a trailing newline */
 	if (count > 0 && buf[count-1] == '\n')
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2ac1d21..6590501 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -943,7 +943,6 @@
 	struct eth_dev		*dev = link->ioport;
 	struct usb_request	*req;
 
-	WARN_ON(!dev);
 	if (!dev)
 		return;
 
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index b56e1e7..5bb1021 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -112,12 +112,14 @@
 
 #ifdef USB_ETH_RNDIS
 
-int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer);
 
 #else
 
 static inline int
-rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer)
 {
 	return 0;
 }
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 40f7716..3fdcc9a 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -122,7 +122,7 @@
 };
 
 /* increase N_PORTS if you need more */
-#define N_PORTS		4
+#define N_PORTS		8
 static struct portmaster {
 	struct mutex	lock;			/* protect open/close */
 	struct gs_port	*port;
@@ -1028,7 +1028,7 @@
 
 static struct tty_driver *gs_tty_driver;
 
-static int __init
+static int
 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
 {
 	struct gs_port	*port;
@@ -1074,7 +1074,7 @@
  *
  * Returns negative errno or zero.
  */
-int __init gserial_setup(struct usb_gadget *g, unsigned count)
+int gserial_setup(struct usb_gadget *g, unsigned count)
 {
 	unsigned			i;
 	struct usb_cdc_line_coding	coding;
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index c66481a..cd77719 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -12,6 +12,14 @@
 	  Select this to make sure the build includes objects from
 	  the OTG infrastructure directory.
 
+config USB_OTG_WAKELOCK
+	bool "Hold a wakelock when USB connected"
+	depends on WAKELOCK
+	select USB_OTG_UTILS
+	help
+	  Select this to automatically hold a wakelock when USB is
+	  connected, preventing suspend.
+
 if USB || USB_GADGET
 
 #
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 566655c..d2c0a7b 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -7,6 +7,8 @@
 
 # infrastructure
 obj-$(CONFIG_USB_OTG_UTILS)	+= otg.o
+obj-$(CONFIG_USB_OTG_WAKELOCK)	+= otg-wakelock.o
+obj-$(CONFIG_USB_OTG_UTILS)	+= otg_id.o
 
 # transceiver drivers
 obj-$(CONFIG_USB_GPIO_VBUS)	+= gpio_vbus.o
diff --git a/drivers/usb/otg/otg-wakelock.c b/drivers/usb/otg/otg-wakelock.c
new file mode 100644
index 0000000..9931626
--- /dev/null
+++ b/drivers/usb/otg/otg-wakelock.c
@@ -0,0 +1,190 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+static bool enabled = true;
+static struct otg_transceiver *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * locked field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+	char name[40];
+	struct wake_lock wakelock;
+	bool locked;
+};
+
+/*
+ * VBUS present lock.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_grab(struct otgwl_lock *lock)
+{
+	if (!lock->locked) {
+		wake_lock(&lock->wakelock);
+		lock->locked = true;
+	}
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+	if (lock->locked) {
+		wake_unlock(&lock->wakelock);
+		lock->locked = false;
+	}
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+				   unsigned long event, void *unused)
+{
+	unsigned long irqflags;
+
+	if (!enabled)
+		return NOTIFY_OK;
+
+	spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+	switch (event) {
+	case USB_EVENT_VBUS:
+	case USB_EVENT_ENUMERATED:
+		otgwl_grab(&vbus_lock);
+		break;
+
+	case USB_EVENT_NONE:
+	case USB_EVENT_ID:
+	case USB_EVENT_CHARGER:
+		otgwl_drop(&vbus_lock);
+		break;
+
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+	return NOTIFY_OK;
+}
+
+static void sync_with_xceiv_state(void)
+{
+	if ((otgwl_xceiv->last_event == USB_EVENT_VBUS) ||
+	    (otgwl_xceiv->last_event == USB_EVENT_ENUMERATED))
+		otgwl_grab(&vbus_lock);
+	else
+		otgwl_drop(&vbus_lock);
+}
+
+static int init_for_xceiv(void)
+{
+	int rv;
+
+	if (!otgwl_xceiv) {
+		otgwl_xceiv = otg_get_transceiver();
+
+		if (!otgwl_xceiv) {
+			pr_err("%s: No OTG transceiver found\n", __func__);
+			return -ENODEV;
+		}
+
+		snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+			 dev_name(otgwl_xceiv->dev));
+		wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+			       vbus_lock.name);
+
+		rv = otg_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+		if (rv) {
+			pr_err("%s: otg_register_notifier on transceiver %s"
+			       " failed\n", __func__,
+			       dev_name(otgwl_xceiv->dev));
+			otgwl_xceiv = NULL;
+			wake_lock_destroy(&vbus_lock.wakelock);
+			return rv;
+		}
+	}
+
+	return 0;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+	unsigned long irqflags;
+	int rv = param_set_bool(val, kp);
+
+	if (rv)
+		return rv;
+
+	rv = init_for_xceiv();
+
+	if (rv)
+		return rv;
+
+	spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+	if (enabled)
+		sync_with_xceiv_state();
+	else
+		otgwl_drop(&vbus_lock);
+
+	spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+	return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+	.set = set_enabled,
+	.get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+	unsigned long irqflags;
+
+	otgwl_nb.notifier_call = otgwl_otg_notifications;
+
+	if (!init_for_xceiv()) {
+		spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+		if (enabled)
+			sync_with_xceiv_state();
+
+		spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+	} else {
+		enabled = false;
+	}
+
+	return 0;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/usb/otg/otg_id.c b/drivers/usb/otg/otg_id.c
new file mode 100644
index 0000000..2398b1a
--- /dev/null
+++ b/drivers/usb/otg/otg_id.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *	Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg_id.h>
+
+static DEFINE_MUTEX(otg_id_lock);
+static struct plist_head otg_id_plist =
+	PLIST_HEAD_INIT(otg_id_plist);
+static struct otg_id_notifier_block *otg_id_active;
+static bool otg_id_cancelling;
+static bool otg_id_inited;
+
+static void otg_id_cancel(void)
+{
+	if (otg_id_active) {
+		otg_id_cancelling = true;
+		mutex_unlock(&otg_id_lock);
+
+		otg_id_active->cancel(otg_id_active);
+
+		mutex_lock(&otg_id_lock);
+		otg_id_cancelling = false;
+	}
+}
+
+static void __otg_id_notify(void)
+{
+	int ret;
+	struct otg_id_notifier_block *otg_id_nb;
+
+	if (plist_head_empty(&otg_id_plist))
+		return;
+
+	plist_for_each_entry(otg_id_nb, &otg_id_plist, p) {
+		ret = otg_id_nb->detect(otg_id_nb);
+		if (ret == OTG_ID_HANDLED) {
+			otg_id_active = otg_id_nb;
+			return;
+		}
+	}
+
+	WARN(1, "otg id event not handled");
+	otg_id_active = NULL;
+}
+
+int otg_id_init(void)
+{
+	mutex_lock(&otg_id_lock);
+
+	otg_id_inited = true;
+	__otg_id_notify();
+
+	mutex_unlock(&otg_id_lock);
+	return 0;
+}
+late_initcall(otg_id_init);
+
+/**
+ * otg_id_register_notifier
+ * @otg_id_nb: notifier block containing priority and callback function
+ *
+ * Register a notifier that will be called on any USB cable state change.
+ * The priority determines the order the callback will be called in, a higher
+ * number will be called first.  A callback function needs to determine the
+ * type of USB cable that is connected.  If it can determine the type, it
+ * should notify the appropriate drivers (for example, call an otg notifier
+ * with USB_EVENT_VBUS), and return OTG_ID_HANDLED.  Once a callback has
+ * returned OTG_ID_HANDLED, it is responsible for calling otg_id_notify() when
+ * the detected USB cable is disconnected.
+ */
+int otg_id_register_notifier(struct otg_id_notifier_block *otg_id_nb)
+{
+	plist_node_init(&otg_id_nb->p, otg_id_nb->priority);
+
+	mutex_lock(&otg_id_lock);
+	plist_add(&otg_id_nb->p, &otg_id_plist);
+
+	if (otg_id_inited) {
+		otg_id_cancel();
+		__otg_id_notify();
+	}
+
+	mutex_unlock(&otg_id_lock);
+
+	return 0;
+}
+
+void otg_id_unregister_notifier(struct otg_id_notifier_block *otg_id_nb)
+{
+	mutex_lock(&otg_id_lock);
+
+	plist_del(&otg_id_nb->p, &otg_id_plist);
+
+	if (otg_id_inited && (otg_id_active == otg_id_nb)) {
+		otg_id_cancel();
+		__otg_id_notify();
+	}
+
+	mutex_unlock(&otg_id_lock);
+}
+
+/**
+ * otg_id_notify
+ *
+ * Notify listeners on any USB cable state change.
+ *
+ * A driver may only call otg_id_notify if it returned OTG_ID_HANDLED the last
+ * time it's notifier was called, and it's cancel function has not been called.
+ */
+void otg_id_notify(void)
+{
+	mutex_lock(&otg_id_lock);
+
+	if (otg_id_cancelling)
+		goto out;
+
+	__otg_id_notify();
+
+out:
+	mutex_unlock(&otg_id_lock);
+}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 549b960..4c85a4b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@
 
 source "drivers/gpu/stub/Kconfig"
 
+source "drivers/gpu/ion/Kconfig"
+
 config VGASTATE
        tristate
        default n
diff --git a/fs/Kconfig b/fs/Kconfig
index 19891aa..88701cc 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -192,6 +192,10 @@
 source "fs/befs/Kconfig"
 source "fs/bfs/Kconfig"
 source "fs/efs/Kconfig"
+
+# Patched by YAFFS
+source "fs/yaffs2/Kconfig"
+
 source "fs/jffs2/Kconfig"
 # UBIFS File system configuration
 source "fs/ubifs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index fb68c2b..2999b4d 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -124,3 +124,6 @@
 obj-$(CONFIG_EXOFS_FS)          += exofs/
 obj-$(CONFIG_CEPH_FS)		+= ceph/
 obj-$(CONFIG_PSTORE)		+= pstore/
+
+# Patched by YAFFS
+obj-$(CONFIG_YAFFS_FS)		+= yaffs2/
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 4ad6473..dc56378 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -754,6 +754,13 @@
 	return ret;
 }
 
+static int fat_ioctl_volume_id(struct inode *dir)
+{
+	struct super_block *sb = dir->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	return sbi->vol_id;
+}
+
 static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
 			  unsigned long arg)
 {
@@ -770,6 +777,8 @@
 		short_only = 0;
 		both = 1;
 		break;
+	case VFAT_IOCTL_GET_VOLUME_ID:
+		return fat_ioctl_volume_id(inode);
 	default:
 		return fat_generic_ioctl(filp, cmd, arg);
 	}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 8276cc2..74fe5d3 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -78,6 +78,7 @@
 	const void *dir_ops;		     /* Opaque; default directory operations */
 	int dir_per_block;	     /* dir entries per block */
 	int dir_per_block_bits;	     /* log2(dir_per_block) */
+	unsigned long vol_id;        /* volume ID */
 
 	int fatent_shift;
 	struct fatent_operations *fatent_ops;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cb8d839..9836839 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1245,6 +1245,7 @@
 	struct inode *root_inode = NULL, *fat_inode = NULL;
 	struct buffer_head *bh;
 	struct fat_boot_sector *b;
+	struct fat_boot_bsx *bsx;
 	struct msdos_sb_info *sbi;
 	u16 logical_sector_size;
 	u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
@@ -1390,6 +1391,8 @@
 			goto out_fail;
 		}
 
+		bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
 		fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
 		if (!IS_FSINFO(fsinfo)) {
 			fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
@@ -1405,8 +1408,14 @@
 		}
 
 		brelse(fsinfo_bh);
+	} else {
+		bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
 	}
 
+	/* interpret volume ID as a little endian 32 bit integer */
+	sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+		((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
 	sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
 	sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 0f015a0..3c72c99 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1038,7 +1038,7 @@
 	if ((inode->i_state & flags) == flags)
 		return;
 
-	if (unlikely(block_dump))
+	if (unlikely(block_dump > 1))
 		block_dump___mark_inode_dirty(inode);
 
 	spin_lock(&inode->i_lock);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index d545e97..a0a041d 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -366,10 +366,21 @@
 	kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct hd_struct *part = dev_to_part(dev);
+
+	add_uevent_var(env, "PARTN=%u", part->partno);
+	if (part->info && part->info->volname[0])
+		add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+	return 0;
+}
+
 struct device_type part_type = {
 	.name		= "partition",
 	.groups		= part_attr_groups,
 	.release	= part_release,
+	.uevent		= part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fc5bc27..cc9c0c3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -133,6 +133,12 @@
 		NULL, &proc_single_file_operations,	\
 		{ .proc_show = show } )
 
+/* ANDROID is for special files in /proc. */
+#define ANDROID(NAME, MODE, OTYPE)			\
+	NOD(NAME, (S_IFREG|(MODE)),			\
+		&proc_##OTYPE##_inode_operations,	\
+		&proc_##OTYPE##_operations, {})
+
 /*
  * Count the number of hardlinks for the pid_entry table, excluding the .
  * and .. links.
@@ -263,7 +269,8 @@
 
 	mm = get_task_mm(task);
 	if (mm && mm != current->mm &&
-			!ptrace_may_access(task, PTRACE_MODE_READ)) {
+			!ptrace_may_access(task, PTRACE_MODE_READ) &&
+			!capable(CAP_SYS_RESOURCE)) {
 		mmput(mm);
 		mm = ERR_PTR(-EACCES);
 	}
@@ -1141,6 +1148,39 @@
 	return err < 0 ? err : count;
 }
 
+static int oom_adjust_permission(struct inode *inode, int mask,
+				 unsigned int flags)
+{
+	uid_t uid;
+	struct task_struct *p;
+
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	p = get_proc_task(inode);
+	if(p) {
+		uid = task_uid(p);
+		put_task_struct(p);
+	}
+
+	/*
+	 * System Server (uid == 1000) is granted access to oom_adj of all 
+	 * android applications (uid > 10000) as and services (uid >= 1000)
+	 */
+	if (p && (current_fsuid() == 1000) && (uid >= 1000)) {
+		if (inode->i_mode >> 6 & mask) {
+			return 0;
+		}
+	}
+
+	/* Fall back to default. */
+	return generic_permission(inode, mask, flags, NULL);
+}
+
+static const struct inode_operations proc_oom_adjust_inode_operations = {
+	.permission	= oom_adjust_permission,
+};
+
 static const struct file_operations proc_oom_adjust_operations = {
 	.read		= oom_adjust_read,
 	.write		= oom_adjust_write,
@@ -2829,7 +2869,7 @@
 	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
 	INF("oom_score",  S_IRUGO, proc_oom_score),
-	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+	ANDROID("oom_adj",S_IRUGO|S_IWUSR, oom_adjust),
 	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 0000000..6354140
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,161 @@
+#
+# YAFFS file system configurations
+#
+
+config YAFFS_FS
+	tristate "YAFFS2 file system support"
+	default n
+	depends on MTD_BLOCK
+	select YAFFS_YAFFS1
+	select YAFFS_YAFFS2
+	help
+	  YAFFS2, or Yet Another Flash Filing System, is a filing system
+	  optimised for NAND Flash chips.
+
+	  To compile the YAFFS2 file system support as a module, choose M
+	  here: the module will be called yaffs2.
+
+	  If unsure, say N.
+
+	  Further information on YAFFS2 is available at
+	  <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+	bool "512 byte / page devices"
+	depends on YAFFS_FS
+	default y
+	help
+	  Enable YAFFS1 support -- yaffs for 512 byte / page devices
+
+	  Not needed for 2K-page devices.
+
+	  If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+	bool "Use older-style on-NAND data format with pageStatus byte"
+	depends on YAFFS_YAFFS1
+	default n
+	help
+
+	  Older-style on-NAND data format has a "pageStatus" byte to record
+	  chunk/page state.  This byte is zero when the page is discarded.
+	  Choose this option if you have existing on-NAND data using this
+	  format that you need to continue to support.  New data written
+	  also uses the older-style format.  Note: Use of this option
+	  generally requires that MTD's oob layout be adjusted to use the
+	  older-style format.  See notes on tags formats and MTD versions
+	  in yaffs_mtdif1.c.
+
+	  If unsure, say N.
+
+config YAFFS_DOES_ECC
+	bool "Lets Yaffs do its own ECC"
+	depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+	default n
+	help
+	  This enables Yaffs to use its own ECC functions instead of using
+	  the ones from the generic MTD-NAND driver.
+
+	  If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+	bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+	depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+	default n
+	help
+	  This makes yaffs_ecc.c use the same ecc byte order as Steven
+	  Hill's nand_ecc.c. If not set, then you get the same ecc byte
+	  order as SmartMedia.
+
+	  If unsure, say N.
+
+config YAFFS_YAFFS2
+	bool "2048 byte (or larger) / page devices"
+	depends on YAFFS_FS
+	default y
+	help
+	  Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
+
+	  If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+	bool "Autoselect yaffs2 format"
+	depends on YAFFS_YAFFS2
+	default y
+	help
+	  Without this, you need to explicitely use yaffs2 as the file
+	  system type. With this, you can say "yaffs" and yaffs or yaffs2
+	  will be used depending on the device page size (yaffs on
+	  512-byte page devices, yaffs2 on 2K page devices).
+
+	  If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+	bool "Disable YAFFS from doing ECC on tags by default"
+	depends on YAFFS_FS && YAFFS_YAFFS2
+	default n
+	help
+	  This defaults Yaffs to using its own ECC calculations on tags instead of
+	  just relying on the MTD.
+	  This behavior can also be overridden with tags_ecc_on and
+	  tags_ecc_off mount options.
+
+	  If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+	bool "Force chunk erase check"
+	depends on YAFFS_FS
+	default n
+	help
+          Normally YAFFS only checks chunks before writing until an erased
+	  chunk is found. This helps to detect any partially written
+	  chunks that might have happened due to power loss.
+
+	  Enabling this forces on the test that chunks are erased in flash
+	  before writing to them. This takes more time but is potentially
+	  a bit more secure.
+
+	  Suggest setting Y during development and ironing out driver
+	  issues etc. Suggest setting to N if you want faster writing.
+
+	  If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+	bool "Empty lost and found on boot"
+	depends on YAFFS_FS
+	default n
+	help
+	  If this is enabled then the contents of lost and found is
+	  automatically dumped at mount.
+
+	  If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+	bool "Disable yaffs2 block refreshing"
+	depends on YAFFS_FS
+	default n
+	help
+	 If this is set, then block refreshing is disabled.
+	 Block refreshing infrequently refreshes the oldest block in
+	 a yaffs2 file system. This mechanism helps to refresh flash to
+	 mitigate against data loss. This is particularly useful for MLC.
+
+	  If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+	bool "Disable yaffs2 background processing"
+	depends on YAFFS_FS
+	default n
+	help
+	 If this is set, then background processing is disabled.
+	 Background processing makes many foreground activities faster.
+
+	 If unsure, say N.
+
+config YAFFS_XATTR
+	bool "Enable yaffs2 xattr support"
+	depends on YAFFS_FS
+	default y
+	help
+	 If this is set then yaffs2 will provide xattr support.
+	 If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 0000000..e63a28a
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 0000000..f9cd5be
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,396 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+#ifdef CONFIG_YAFFS_KMALLOC_ALLOCATOR
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	dev = dev;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	dev = dev;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+	return (struct yaffs_tnode *)kmalloc(dev->tnode_size, GFP_NOFS);
+}
+
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+	dev = dev;
+	kfree(tn);
+}
+
+void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+	dev = dev;
+}
+
+void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+	dev = dev;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+	dev = dev;
+	return (struct yaffs_obj *)kmalloc(sizeof(struct yaffs_obj));
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+	dev = dev;
+	kfree(obj);
+}
+
+#else
+
+struct yaffs_tnode_list {
+	struct yaffs_tnode_list *next;
+	struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+	struct yaffs_obj_list *next;
+	struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+	int n_tnodes_created;
+	struct yaffs_tnode *free_tnodes;
+	int n_free_tnodes;
+	struct yaffs_tnode_list *alloc_tnode_list;
+
+	int n_obj_created;
+	struct yaffs_obj *free_objs;
+	int n_free_objects;
+
+	struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+
+	struct yaffs_allocator *allocator =
+	    (struct yaffs_allocator *)dev->allocator;
+
+	struct yaffs_tnode_list *tmp;
+
+	if (!allocator) {
+		YBUG();
+		return;
+	}
+
+	while (allocator->alloc_tnode_list) {
+		tmp = allocator->alloc_tnode_list->next;
+
+		kfree(allocator->alloc_tnode_list->tnodes);
+		kfree(allocator->alloc_tnode_list);
+		allocator->alloc_tnode_list = tmp;
+
+	}
+
+	allocator->free_tnodes = NULL;
+	allocator->n_free_tnodes = 0;
+	allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (allocator) {
+		allocator->alloc_tnode_list = NULL;
+		allocator->free_tnodes = NULL;
+		allocator->n_free_tnodes = 0;
+		allocator->n_tnodes_created = 0;
+	} else {
+		YBUG();
+	}
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+	struct yaffs_allocator *allocator =
+	    (struct yaffs_allocator *)dev->allocator;
+	int i;
+	struct yaffs_tnode *new_tnodes;
+	u8 *mem;
+	struct yaffs_tnode *curr;
+	struct yaffs_tnode *next;
+	struct yaffs_tnode_list *tnl;
+
+	if (!allocator) {
+		YBUG();
+		return YAFFS_FAIL;
+	}
+
+	if (n_tnodes < 1)
+		return YAFFS_OK;
+
+	/* make these things */
+
+	new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+	mem = (u8 *) new_tnodes;
+
+	if (!new_tnodes) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"yaffs: Could not allocate Tnodes");
+		return YAFFS_FAIL;
+	}
+
+	/* New hookup for wide tnodes */
+	for (i = 0; i < n_tnodes - 1; i++) {
+		curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+		next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+		curr->internal[0] = next;
+	}
+
+	curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+	curr->internal[0] = allocator->free_tnodes;
+	allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+	allocator->n_free_tnodes += n_tnodes;
+	allocator->n_tnodes_created += n_tnodes;
+
+	/* Now add this bunch of tnodes to a list for freeing up.
+	 * NB If we can't add this to the management list it isn't fatal
+	 * but it just means we can't free this bunch of tnodes later.
+	 */
+
+	tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+	if (!tnl) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Could not add tnodes to management list");
+		return YAFFS_FAIL;
+	} else {
+		tnl->tnodes = new_tnodes;
+		tnl->next = allocator->alloc_tnode_list;
+		allocator->alloc_tnode_list = tnl;
+	}
+
+	yaffs_trace(YAFFS_TRACE_ALLOCATE,"Tnodes added");
+
+	return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator =
+	    (struct yaffs_allocator *)dev->allocator;
+	struct yaffs_tnode *tn = NULL;
+
+	if (!allocator) {
+		YBUG();
+		return NULL;
+	}
+
+	/* If there are none left make more */
+	if (!allocator->free_tnodes)
+		yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+	if (allocator->free_tnodes) {
+		tn = allocator->free_tnodes;
+		allocator->free_tnodes = allocator->free_tnodes->internal[0];
+		allocator->n_free_tnodes--;
+	}
+
+	return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		YBUG();
+		return;
+	}
+
+	if (tn) {
+		tn->internal[0] = allocator->free_tnodes;
+		allocator->free_tnodes = tn;
+		allocator->n_free_tnodes++;
+	}
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+}
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (allocator) {
+		allocator->allocated_obj_list = NULL;
+		allocator->free_objs = NULL;
+		allocator->n_free_objects = 0;
+	} else {
+		YBUG();
+	}
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+	struct yaffs_obj_list *tmp;
+
+	if (!allocator) {
+		YBUG();
+		return;
+	}
+
+	while (allocator->allocated_obj_list) {
+		tmp = allocator->allocated_obj_list->next;
+		kfree(allocator->allocated_obj_list->objects);
+		kfree(allocator->allocated_obj_list);
+
+		allocator->allocated_obj_list = tmp;
+	}
+
+	allocator->free_objs = NULL;
+	allocator->n_free_objects = 0;
+	allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	int i;
+	struct yaffs_obj *new_objs;
+	struct yaffs_obj_list *list;
+
+	if (!allocator) {
+		YBUG();
+		return YAFFS_FAIL;
+	}
+
+	if (n_obj < 1)
+		return YAFFS_OK;
+
+	/* make these things */
+	new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+	list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+	if (!new_objs || !list) {
+		if (new_objs) {
+			kfree(new_objs);
+			new_objs = NULL;
+		}
+		if (list) {
+			kfree(list);
+			list = NULL;
+		}
+		yaffs_trace(YAFFS_TRACE_ALLOCATE,
+			"Could not allocate more objects");
+		return YAFFS_FAIL;
+	}
+
+	/* Hook them into the free list */
+	for (i = 0; i < n_obj - 1; i++) {
+		new_objs[i].siblings.next =
+		    (struct list_head *)(&new_objs[i + 1]);
+	}
+
+	new_objs[n_obj - 1].siblings.next = (void *)allocator->free_objs;
+	allocator->free_objs = new_objs;
+	allocator->n_free_objects += n_obj;
+	allocator->n_obj_created += n_obj;
+
+	/* Now add this bunch of Objects to a list for freeing up. */
+
+	list->objects = new_objs;
+	list->next = allocator->allocated_obj_list;
+	allocator->allocated_obj_list = list;
+
+	return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		YBUG();
+		return obj;
+	}
+
+	/* If there are none left make more */
+	if (!allocator->free_objs)
+		yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+	if (allocator->free_objs) {
+		obj = allocator->free_objs;
+		allocator->free_objs =
+		    (struct yaffs_obj *)(allocator->free_objs->siblings.next);
+		allocator->n_free_objects--;
+	}
+
+	return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator)
+		YBUG();
+	else {
+		/* Link into the free list. */
+		obj->siblings.next = (struct list_head *)(allocator->free_objs);
+		allocator->free_objs = obj;
+		allocator->n_free_objects++;
+	}
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	if (dev->allocator) {
+		yaffs_deinit_raw_tnodes(dev);
+		yaffs_deinit_raw_objs(dev);
+
+		kfree(dev->allocator);
+		dev->allocator = NULL;
+	} else {
+		YBUG();
+	}
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator;
+
+	if (!dev->allocator) {
+		allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+		if (allocator) {
+			dev->allocator = allocator;
+			yaffs_init_raw_tnodes(dev);
+			yaffs_init_raw_objs(dev);
+		}
+	} else {
+		YBUG();
+	}
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 0000000..4d5f2ae
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 0000000..9b47d37
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+	obj->yst_uid = oh->yst_uid;
+	obj->yst_gid = oh->yst_gid;
+	obj->yst_atime = oh->yst_atime;
+	obj->yst_mtime = oh->yst_mtime;
+	obj->yst_ctime = oh->yst_ctime;
+	obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+	oh->yst_uid = obj->yst_uid;
+	oh->yst_gid = obj->yst_gid;
+	oh->yst_atime = obj->yst_atime;
+	oh->yst_mtime = obj->yst_mtime;
+	oh->yst_ctime = obj->yst_ctime;
+	oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+	obj->yst_mtime = Y_CURRENT_TIME;
+	if (do_a)
+		obj->yst_atime = obj->yst_mtime;
+	if (do_c)
+		obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+	yaffs_load_current_time(obj, 1, 1);
+	obj->yst_rdev = rdev;
+	obj->yst_uid = uid;
+	obj->yst_gid = gid;
+}
+
+loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+	YCHAR *alias = NULL;
+	obj = yaffs_get_equivalent_obj(obj);
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		return obj->variant.file_variant.file_size;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		alias = obj->variant.symlink_variant.alias;
+		if (!alias)
+			return 0;
+		return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+	default:
+		return 0;
+	}
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+	unsigned int valid = attr->ia_valid;
+
+	if (valid & ATTR_MODE)
+		obj->yst_mode = attr->ia_mode;
+	if (valid & ATTR_UID)
+		obj->yst_uid = attr->ia_uid;
+	if (valid & ATTR_GID)
+		obj->yst_gid = attr->ia_gid;
+
+	if (valid & ATTR_ATIME)
+		obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+	if (valid & ATTR_CTIME)
+		obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+	if (valid & ATTR_MTIME)
+		obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+	if (valid & ATTR_SIZE)
+		yaffs_resize_file(obj, attr->ia_size);
+
+	yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+	return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+	unsigned int valid = 0;
+
+	attr->ia_mode = obj->yst_mode;
+	valid |= ATTR_MODE;
+	attr->ia_uid = obj->yst_uid;
+	valid |= ATTR_UID;
+	attr->ia_gid = obj->yst_gid;
+	valid |= ATTR_GID;
+
+	Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+	valid |= ATTR_ATIME;
+	Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+	valid |= ATTR_CTIME;
+	Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+	valid |= ATTR_MTIME;
+
+	attr->ia_size = yaffs_get_file_size(obj);
+	valid |= ATTR_SIZE;
+
+	attr->ia_valid = valid;
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 0000000..33d541d
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 0000000..7df42cd
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,98 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+	if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"BlockBits block %d is not valid",
+			blk);
+		YBUG();
+	}
+	return dev->chunk_bits +
+	    (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+	if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+	    chunk < 0 || chunk >= dev->param.chunks_per_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Chunk Id (%d:%d) invalid",
+			blk, chunk);
+		YBUG();
+	}
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+	blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+	blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+	yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+	return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+	int i;
+	for (i = 0; i < dev->chunk_bit_stride; i++) {
+		if (*blk_bits)
+			return 1;
+		blk_bits++;
+	}
+	return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+	int i;
+	int n = 0;
+
+	for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+		n += hweight8(*blk_bits);
+
+	return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 0000000..cf9ea58
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 0000000..4e40f43
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,415 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+	int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checkpt blocks_avail = %d", blocks_avail);
+
+	return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+	int i;
+
+	if (!dev->param.erase_fn)
+		return 0;
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checking blocks %d to %d",
+		dev->internal_start_block, dev->internal_end_block);
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+		if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"erasing checkpt block %d", i);
+
+			dev->n_erasures++;
+
+			if (dev->param.
+			    erase_fn(dev,
+				     i - dev->block_offset /* realign */ )) {
+				bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+				dev->n_erased_blocks++;
+				dev->n_free_chunks +=
+				    dev->param.chunks_per_block;
+			} else {
+				dev->param.bad_block_fn(dev, i);
+				bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+			}
+		}
+	}
+
+	dev->blocks_in_checkpt = 0;
+
+	return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+	int i;
+	int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"allocating checkpt block: erased %d reserved %d avail %d next %d ",
+		dev->n_erased_blocks, dev->param.n_reserved_blocks,
+		blocks_avail, dev->checkpt_next_block);
+
+	if (dev->checkpt_next_block >= 0 &&
+	    dev->checkpt_next_block <= dev->internal_end_block &&
+	    blocks_avail > 0) {
+
+		for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+		     i++) {
+			struct yaffs_block_info *bi =
+			    yaffs_get_block_info(dev, i);
+			if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+				dev->checkpt_next_block = i + 1;
+				dev->checkpt_cur_block = i;
+				yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+					"allocating checkpt block %d", i);
+				return;
+			}
+		}
+	}
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+	dev->checkpt_next_block = -1;
+	dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+	int i;
+	struct yaffs_ext_tags tags;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"find next checkpt block: start:  blocks %d next %d",
+		dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+	if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+		for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+		     i++) {
+			int chunk = i * dev->param.chunks_per_block;
+			int realigned_chunk = chunk - dev->chunk_offset;
+
+			dev->param.read_chunk_tags_fn(dev, realigned_chunk,
+						      NULL, &tags);
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+				"find next checkpt block: search: block %d oid %d seq %d eccr %d",
+				i, tags.obj_id, tags.seq_number,
+				tags.ecc_result);
+
+			if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+				/* Right kind of block */
+				dev->checkpt_next_block = tags.obj_id;
+				dev->checkpt_cur_block = i;
+				dev->checkpt_block_list[dev->
+							blocks_in_checkpt] = i;
+				dev->blocks_in_checkpt++;
+				yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+					"found checkpt block %d", i);
+				return;
+			}
+		}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+	dev->checkpt_next_block = -1;
+	dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+
+	dev->checkpt_open_write = writing;
+
+	/* Got the functions we need? */
+	if (!dev->param.write_chunk_tags_fn ||
+	    !dev->param.read_chunk_tags_fn ||
+	    !dev->param.erase_fn || !dev->param.bad_block_fn)
+		return 0;
+
+	if (writing && !yaffs2_checkpt_space_ok(dev))
+		return 0;
+
+	if (!dev->checkpt_buffer)
+		dev->checkpt_buffer =
+		    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+	if (!dev->checkpt_buffer)
+		return 0;
+
+	dev->checkpt_page_seq = 0;
+	dev->checkpt_byte_count = 0;
+	dev->checkpt_sum = 0;
+	dev->checkpt_xor = 0;
+	dev->checkpt_cur_block = -1;
+	dev->checkpt_cur_chunk = -1;
+	dev->checkpt_next_block = dev->internal_start_block;
+
+	/* Erase all the blocks in the checkpoint area */
+	if (writing) {
+		memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+		dev->checkpt_byte_offs = 0;
+		return yaffs_checkpt_erase(dev);
+	} else {
+		int i;
+		/* Set to a value that will kick off a read */
+		dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+		/* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+		 * going to be way more than we need */
+		dev->blocks_in_checkpt = 0;
+		dev->checkpt_max_blocks =
+		    (dev->internal_end_block - dev->internal_start_block) / 16 +
+		    2;
+		dev->checkpt_block_list =
+		    kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+		if (!dev->checkpt_block_list)
+			return 0;
+
+		for (i = 0; i < dev->checkpt_max_blocks; i++)
+			dev->checkpt_block_list[i] = -1;
+	}
+
+	return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+	u32 composite_sum;
+	composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
+	*sum = composite_sum;
+	return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+	int chunk;
+	int realigned_chunk;
+
+	struct yaffs_ext_tags tags;
+
+	if (dev->checkpt_cur_block < 0) {
+		yaffs2_checkpt_find_erased_block(dev);
+		dev->checkpt_cur_chunk = 0;
+	}
+
+	if (dev->checkpt_cur_block < 0)
+		return 0;
+
+	tags.is_deleted = 0;
+	tags.obj_id = dev->checkpt_next_block;	/* Hint to next place to look */
+	tags.chunk_id = dev->checkpt_page_seq + 1;
+	tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+	tags.n_bytes = dev->data_bytes_per_chunk;
+	if (dev->checkpt_cur_chunk == 0) {
+		/* First chunk we write for the block? Set block state to
+		   checkpoint */
+		struct yaffs_block_info *bi =
+		    yaffs_get_block_info(dev, dev->checkpt_cur_block);
+		bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+		dev->blocks_in_checkpt++;
+	}
+
+	chunk =
+	    dev->checkpt_cur_block * dev->param.chunks_per_block +
+	    dev->checkpt_cur_chunk;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+		chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+		tags.obj_id, tags.chunk_id);
+
+	realigned_chunk = chunk - dev->chunk_offset;
+
+	dev->n_page_writes++;
+
+	dev->param.write_chunk_tags_fn(dev, realigned_chunk,
+				       dev->checkpt_buffer, &tags);
+	dev->checkpt_byte_offs = 0;
+	dev->checkpt_page_seq++;
+	dev->checkpt_cur_chunk++;
+	if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+		dev->checkpt_cur_chunk = 0;
+		dev->checkpt_cur_block = -1;
+	}
+	memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+	return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+	int i = 0;
+	int ok = 1;
+
+	u8 *data_bytes = (u8 *) data;
+
+	if (!dev->checkpt_buffer)
+		return 0;
+
+	if (!dev->checkpt_open_write)
+		return -1;
+
+	while (i < n_bytes && ok) {
+		dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+		dev->checkpt_sum += *data_bytes;
+		dev->checkpt_xor ^= *data_bytes;
+
+		dev->checkpt_byte_offs++;
+		i++;
+		data_bytes++;
+		dev->checkpt_byte_count++;
+
+		if (dev->checkpt_byte_offs < 0 ||
+		    dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+			ok = yaffs2_checkpt_flush_buffer(dev);
+	}
+
+	return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+	int i = 0;
+	int ok = 1;
+	struct yaffs_ext_tags tags;
+
+	int chunk;
+	int realigned_chunk;
+
+	u8 *data_bytes = (u8 *) data;
+
+	if (!dev->checkpt_buffer)
+		return 0;
+
+	if (dev->checkpt_open_write)
+		return -1;
+
+	while (i < n_bytes && ok) {
+
+		if (dev->checkpt_byte_offs < 0 ||
+		    dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+			if (dev->checkpt_cur_block < 0) {
+				yaffs2_checkpt_find_block(dev);
+				dev->checkpt_cur_chunk = 0;
+			}
+
+			if (dev->checkpt_cur_block < 0)
+				ok = 0;
+			else {
+				chunk = dev->checkpt_cur_block *
+				    dev->param.chunks_per_block +
+				    dev->checkpt_cur_chunk;
+
+				realigned_chunk = chunk - dev->chunk_offset;
+
+				dev->n_page_reads++;
+
+				/* read in the next chunk */
+				dev->param.read_chunk_tags_fn(dev,
+							      realigned_chunk,
+							      dev->
+							      checkpt_buffer,
+							      &tags);
+
+				if (tags.chunk_id != (dev->checkpt_page_seq + 1)
+				    || tags.ecc_result > YAFFS_ECC_RESULT_FIXED
+				    || tags.seq_number !=
+				    YAFFS_SEQUENCE_CHECKPOINT_DATA)
+					ok = 0;
+
+				dev->checkpt_byte_offs = 0;
+				dev->checkpt_page_seq++;
+				dev->checkpt_cur_chunk++;
+
+				if (dev->checkpt_cur_chunk >=
+				    dev->param.chunks_per_block)
+					dev->checkpt_cur_block = -1;
+			}
+		}
+
+		if (ok) {
+			*data_bytes =
+			    dev->checkpt_buffer[dev->checkpt_byte_offs];
+			dev->checkpt_sum += *data_bytes;
+			dev->checkpt_xor ^= *data_bytes;
+			dev->checkpt_byte_offs++;
+			i++;
+			data_bytes++;
+			dev->checkpt_byte_count++;
+		}
+	}
+
+	return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+
+	if (dev->checkpt_open_write) {
+		if (dev->checkpt_byte_offs != 0)
+			yaffs2_checkpt_flush_buffer(dev);
+	} else if (dev->checkpt_block_list) {
+		int i;
+		for (i = 0;
+		     i < dev->blocks_in_checkpt
+		     && dev->checkpt_block_list[i] >= 0; i++) {
+			int blk = dev->checkpt_block_list[i];
+			struct yaffs_block_info *bi = NULL;
+			if (dev->internal_start_block <= blk
+			    && blk <= dev->internal_end_block)
+				bi = yaffs_get_block_info(dev, blk);
+			if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+				bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+			else {
+				/* Todo this looks odd... */
+			}
+		}
+		kfree(dev->checkpt_block_list);
+		dev->checkpt_block_list = NULL;
+	}
+
+	dev->n_free_chunks -=
+	    dev->blocks_in_checkpt * dev->param.chunks_per_block;
+	dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,"checkpoint byte count %d",
+		dev->checkpt_byte_count);
+
+	if (dev->checkpt_buffer) {
+		/* free the buffer */
+		kfree(dev->checkpt_buffer);
+		dev->checkpt_buffer = NULL;
+		return 1;
+	} else {
+		return 0;
+        }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+	/* Erase the checkpoint data */
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checkpoint invalidate of %d blocks",
+		dev->blocks_in_checkpt);
+
+	return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 0000000..361c606
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 0000000..e95a806
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,298 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
+ * this bytes influence on the line parity.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+static const unsigned char column_parity_table[] = {
+	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc)
+{
+	unsigned int i;
+
+	unsigned char col_parity = 0;
+	unsigned char line_parity = 0;
+	unsigned char line_parity_prime = 0;
+	unsigned char t;
+	unsigned char b;
+
+	for (i = 0; i < 256; i++) {
+		b = column_parity_table[*data++];
+		col_parity ^= b;
+
+		if (b & 0x01) {	/* odd number of bits in the byte */
+			line_parity ^= i;
+			line_parity_prime ^= ~i;
+		}
+	}
+
+	ecc[2] = (~col_parity) | 0x03;
+
+	t = 0;
+	if (line_parity & 0x80)
+		t |= 0x80;
+	if (line_parity_prime & 0x80)
+		t |= 0x40;
+	if (line_parity & 0x40)
+		t |= 0x20;
+	if (line_parity_prime & 0x40)
+		t |= 0x10;
+	if (line_parity & 0x20)
+		t |= 0x08;
+	if (line_parity_prime & 0x20)
+		t |= 0x04;
+	if (line_parity & 0x10)
+		t |= 0x02;
+	if (line_parity_prime & 0x10)
+		t |= 0x01;
+	ecc[1] = ~t;
+
+	t = 0;
+	if (line_parity & 0x08)
+		t |= 0x80;
+	if (line_parity_prime & 0x08)
+		t |= 0x40;
+	if (line_parity & 0x04)
+		t |= 0x20;
+	if (line_parity_prime & 0x04)
+		t |= 0x10;
+	if (line_parity & 0x02)
+		t |= 0x08;
+	if (line_parity_prime & 0x02)
+		t |= 0x04;
+	if (line_parity & 0x01)
+		t |= 0x02;
+	if (line_parity_prime & 0x01)
+		t |= 0x01;
+	ecc[0] = ~t;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+	/* Swap the bytes into the wrong order */
+	t = ecc[0];
+	ecc[0] = ecc[1];
+	ecc[1] = t;
+#endif
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+		      const unsigned char *test_ecc)
+{
+	unsigned char d0, d1, d2;	/* deltas */
+
+	d0 = read_ecc[0] ^ test_ecc[0];
+	d1 = read_ecc[1] ^ test_ecc[1];
+	d2 = read_ecc[2] ^ test_ecc[2];
+
+	if ((d0 | d1 | d2) == 0)
+		return 0;	/* no error */
+
+	if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+	    ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+	    ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+		/* Single bit (recoverable) error in data */
+
+		unsigned byte;
+		unsigned bit;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+		/* swap the bytes to correct for the wrong order */
+		unsigned char t;
+
+		t = d0;
+		d0 = d1;
+		d1 = t;
+#endif
+
+		bit = byte = 0;
+
+		if (d1 & 0x80)
+			byte |= 0x80;
+		if (d1 & 0x20)
+			byte |= 0x40;
+		if (d1 & 0x08)
+			byte |= 0x20;
+		if (d1 & 0x02)
+			byte |= 0x10;
+		if (d0 & 0x80)
+			byte |= 0x08;
+		if (d0 & 0x20)
+			byte |= 0x04;
+		if (d0 & 0x08)
+			byte |= 0x02;
+		if (d0 & 0x02)
+			byte |= 0x01;
+
+		if (d2 & 0x80)
+			bit |= 0x04;
+		if (d2 & 0x20)
+			bit |= 0x02;
+		if (d2 & 0x08)
+			bit |= 0x01;
+
+		data[byte] ^= (1 << bit);
+
+		return 1;	/* Corrected the error */
+	}
+
+	if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+		/* Reccoverable error in ecc */
+
+		read_ecc[0] = test_ecc[0];
+		read_ecc[1] = test_ecc[1];
+		read_ecc[2] = test_ecc[2];
+
+		return 1;	/* Corrected the error */
+	}
+
+	/* Unrecoverable error */
+
+	return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+			  struct yaffs_ecc_other *ecc_other)
+{
+	unsigned int i;
+
+	unsigned char col_parity = 0;
+	unsigned line_parity = 0;
+	unsigned line_parity_prime = 0;
+	unsigned char b;
+
+	for (i = 0; i < n_bytes; i++) {
+		b = column_parity_table[*data++];
+		col_parity ^= b;
+
+		if (b & 0x01) {
+			/* odd number of bits in the byte */
+			line_parity ^= i;
+			line_parity_prime ^= ~i;
+		}
+
+	}
+
+	ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+	ecc_other->line_parity = line_parity;
+	ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+			    struct yaffs_ecc_other *read_ecc,
+			    const struct yaffs_ecc_other *test_ecc)
+{
+	unsigned char delta_col;	/* column parity delta */
+	unsigned delta_line;	/* line parity delta */
+	unsigned delta_line_prime;	/* line parity delta */
+	unsigned bit;
+
+	delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+	delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+	delta_line_prime =
+	    read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+	if ((delta_col | delta_line | delta_line_prime) == 0)
+		return 0;	/* no error */
+
+	if (delta_line == ~delta_line_prime &&
+	    (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+		/* Single bit (recoverable) error in data */
+
+		bit = 0;
+
+		if (delta_col & 0x20)
+			bit |= 0x04;
+		if (delta_col & 0x08)
+			bit |= 0x02;
+		if (delta_col & 0x02)
+			bit |= 0x01;
+
+		if (delta_line >= n_bytes)
+			return -1;
+
+		data[delta_line] ^= (1 << bit);
+
+		return 1;	/* corrected */
+	}
+
+	if ((hweight32(delta_line) +
+	     hweight32(delta_line_prime) +
+	     hweight8(delta_col)) == 1) {
+		/* Reccoverable error in ecc */
+
+		*read_ecc = *test_ecc;
+		return 1;	/* corrected */
+	}
+
+	/* Unrecoverable error */
+
+	return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 0000000..b0c461d
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+	unsigned char col_parity;
+	unsigned line_parity;
+	unsigned line_parity_prime;
+};
+
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+		      const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+			  struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+			    struct yaffs_ecc_other *read_ecc,
+			    const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 0000000..d87acbd
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+							      *dev, int blk)
+{
+	if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>> yaffs: get_block_info block %d is not valid",
+			blk);
+		YBUG();
+	}
+	return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 0000000..f4ae9de
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5164 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_tagsvalidity.h"
+#include "yaffs_getblockinfo.h"
+
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_nand.h"
+
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+
+#include "yaffs_attribs.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+			     const u8 * buffer, int n_bytes, int use_reserve);
+
+
+
+/* Function to calculate chunk and offset */
+
+static void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+				int *chunk_out, u32 * offset_out)
+{
+	int chunk;
+	u32 offset;
+
+	chunk = (u32) (addr >> dev->chunk_shift);
+
+	if (dev->chunk_div == 1) {
+		/* easy power of 2 case */
+		offset = (u32) (addr & dev->chunk_mask);
+	} else {
+		/* Non power-of-2 case */
+
+		loff_t chunk_base;
+
+		chunk /= dev->chunk_div;
+
+		chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+		offset = (u32) (addr - chunk_base);
+	}
+
+	*chunk_out = chunk;
+	*offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static u32 calc_shifts_ceiling(u32 x)
+{
+	int extra_bits;
+	int shifts;
+
+	shifts = extra_bits = 0;
+
+	while (x > 1) {
+		if (x & 1)
+			extra_bits++;
+		x >>= 1;
+		shifts++;
+	}
+
+	if (extra_bits)
+		shifts++;
+
+	return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static u32 calc_shifts(u32 x)
+{
+	u32 shifts;
+
+	shifts = 0;
+
+	if (!x)
+		return 0;
+
+	while (!(x & 1)) {
+		x >>= 1;
+		shifts++;
+	}
+
+	return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+	int i;
+	u8 *buf = (u8 *) 1;
+
+	memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+	for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+		dev->temp_buffer[i].line = 0;	/* not in use */
+		dev->temp_buffer[i].buffer = buf =
+		    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+	}
+
+	return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev, int line_no)
+{
+	int i, j;
+
+	dev->temp_in_use++;
+	if (dev->temp_in_use > dev->max_temp)
+		dev->max_temp = dev->temp_in_use;
+
+	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+		if (dev->temp_buffer[i].line == 0) {
+			dev->temp_buffer[i].line = line_no;
+			if ((i + 1) > dev->max_temp) {
+				dev->max_temp = i + 1;
+				for (j = 0; j <= i; j++)
+					dev->temp_buffer[j].max_line =
+					    dev->temp_buffer[j].line;
+			}
+
+			return dev->temp_buffer[i].buffer;
+		}
+	}
+
+	yaffs_trace(YAFFS_TRACE_BUFFERS,
+		"Out of temp buffers at line %d, other held by lines:",
+		line_no);
+	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+		yaffs_trace(YAFFS_TRACE_BUFFERS," %d", dev->temp_buffer[i].line);
+
+	/*
+	 * If we got here then we have to allocate an unmanaged one
+	 * This is not good.
+	 */
+
+	dev->unmanaged_buffer_allocs++;
+	return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no)
+{
+	int i;
+
+	dev->temp_in_use--;
+
+	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+		if (dev->temp_buffer[i].buffer == buffer) {
+			dev->temp_buffer[i].line = 0;
+			return;
+		}
+	}
+
+	if (buffer) {
+		/* assume it is an unmanaged one. */
+		yaffs_trace(YAFFS_TRACE_BUFFERS,
+		  "Releasing unmanaged temp buffer in line %d",
+		   line_no);
+		kfree(buffer);
+		dev->unmanaged_buffer_deallocs++;
+	}
+
+}
+
+/*
+ * Determine if we have a managed buffer.
+ */
+int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 * buffer)
+{
+	int i;
+
+	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+		if (dev->temp_buffer[i].buffer == buffer)
+			return 1;
+	}
+
+	for (i = 0; i < dev->param.n_caches; i++) {
+		if (dev->cache[i].data == buffer)
+			return 1;
+	}
+
+	if (buffer == dev->checkpt_buffer)
+		return 1;
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+	  "yaffs: unmaged buffer detected.");
+	return 0;
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+				     const u8 * data,
+				     const struct yaffs_ext_tags *tags)
+{
+	dev = dev;
+	nand_chunk = nand_chunk;
+	data = data;
+	tags = tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+				      const struct yaffs_ext_tags *tags)
+{
+	dev = dev;
+	nand_chunk = nand_chunk;
+	tags = tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+			      struct yaffs_block_info *bi)
+{
+	if (!bi->gc_prioritise) {
+		bi->gc_prioritise = 1;
+		dev->has_pending_prioritised_gc = 1;
+		bi->chunk_error_strikes++;
+
+		if (bi->chunk_error_strikes > 3) {
+			bi->needs_retiring = 1;	/* Too many stikes, so retire this */
+			yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: Block struck out");
+
+		}
+	}
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+					int erased_ok)
+{
+	int flash_block = nand_chunk / dev->param.chunks_per_block;
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+	yaffs_handle_chunk_error(dev, bi);
+
+	if (erased_ok) {
+		/* Was an actual write failure, so mark the block for retirement  */
+		bi->needs_retiring = 1;
+		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+		  "**>> Block %d needs retiring", flash_block);
+	}
+
+	/* Delete the chunk */
+	yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+	yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ *  Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+	n = abs(n);
+	return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+	return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+	return dev->lost_n_found;
+}
+
+/*
+ *  Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 * buffer, int n_bytes)
+{
+	/* Horrible, slow implementation */
+	while (n_bytes--) {
+		if (*buffer != 0xFF)
+			return 0;
+		buffer++;
+	}
+	return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+	int retval = YAFFS_OK;
+	u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
+	struct yaffs_ext_tags tags;
+	int result;
+
+	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+	if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+		retval = YAFFS_FAIL;
+
+	if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+		tags.chunk_used) {
+		yaffs_trace(YAFFS_TRACE_NANDACCESS, "Chunk %d not erased", nand_chunk);
+		retval = YAFFS_FAIL;
+	}
+
+	yaffs_release_temp_buffer(dev, data, __LINE__);
+
+	return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+				      int nand_chunk,
+				      const u8 * data,
+				      struct yaffs_ext_tags *tags)
+{
+	int retval = YAFFS_OK;
+	struct yaffs_ext_tags temp_tags;
+	u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+	int result;
+
+	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+	if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+	    temp_tags.obj_id != tags->obj_id ||
+	    temp_tags.chunk_id != tags->chunk_id ||
+	    temp_tags.n_bytes != tags->n_bytes)
+		retval = YAFFS_FAIL;
+
+	yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+	return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+	int reserved_chunks;
+	int reserved_blocks = dev->param.n_reserved_blocks;
+	int checkpt_blocks;
+
+	checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+	reserved_chunks =
+	    ((reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block);
+
+	return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+	int i;
+
+	struct yaffs_block_info *bi;
+
+	if (dev->n_erased_blocks < 1) {
+		/* Hoosterman we've got a problem.
+		 * Can't get space to gc
+		 */
+		yaffs_trace(YAFFS_TRACE_ERROR,
+		  "yaffs tragedy: no more erased blocks" );
+
+		return -1;
+	}
+
+	/* Find an empty block. */
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		dev->alloc_block_finder++;
+		if (dev->alloc_block_finder < dev->internal_start_block
+		    || dev->alloc_block_finder > dev->internal_end_block) {
+			dev->alloc_block_finder = dev->internal_start_block;
+		}
+
+		bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+			bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+			dev->seq_number++;
+			bi->seq_number = dev->seq_number;
+			dev->n_erased_blocks--;
+			yaffs_trace(YAFFS_TRACE_ALLOCATE,
+			  "Allocated block %d, seq  %d, %d left" ,
+			   dev->alloc_block_finder, dev->seq_number,
+			   dev->n_erased_blocks);
+			return dev->alloc_block_finder;
+		}
+	}
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs tragedy: no more erased blocks, but there should have been %d",
+		dev->n_erased_blocks);
+
+	return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+			     struct yaffs_block_info **block_ptr)
+{
+	int ret_val;
+	struct yaffs_block_info *bi;
+
+	if (dev->alloc_block < 0) {
+		/* Get next block to allocate off */
+		dev->alloc_block = yaffs_find_alloc_block(dev);
+		dev->alloc_page = 0;
+	}
+
+	if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+		/* Not enough space to allocate unless we're allowed to use the reserve. */
+		return -1;
+	}
+
+	if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+	    && dev->alloc_page == 0)
+		yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+	/* Next page please.... */
+	if (dev->alloc_block >= 0) {
+		bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+		ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+		    dev->alloc_page;
+		bi->pages_in_use++;
+		yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+		dev->alloc_page++;
+
+		dev->n_free_chunks--;
+
+		/* If the block is full set the state to full */
+		if (dev->alloc_page >= dev->param.chunks_per_block) {
+			bi->block_state = YAFFS_BLOCK_STATE_FULL;
+			dev->alloc_block = -1;
+		}
+
+		if (block_ptr)
+			*block_ptr = bi;
+
+		return ret_val;
+	}
+
+	yaffs_trace(YAFFS_TRACE_ERROR, "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" );
+
+	return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+	int n;
+
+	n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+	if (dev->alloc_block > 0)
+		n += (dev->param.chunks_per_block - dev->alloc_page);
+
+	return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+	if (dev->alloc_block > 0) {
+		struct yaffs_block_info *bi =
+		    yaffs_get_block_info(dev, dev->alloc_block);
+		if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+			bi->block_state = YAFFS_BLOCK_STATE_FULL;
+			dev->alloc_block = -1;
+		}
+	}
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+				 const u8 * data,
+				 struct yaffs_ext_tags *tags, int use_reserver)
+{
+	int attempts = 0;
+	int write_ok = 0;
+	int chunk;
+
+	yaffs2_checkpt_invalidate(dev);
+
+	do {
+		struct yaffs_block_info *bi = 0;
+		int erased_ok = 0;
+
+		chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+		if (chunk < 0) {
+			/* no space */
+			break;
+		}
+
+		/* First check this chunk is erased, if it needs
+		 * checking.  The checking policy (unless forced
+		 * always on) is as follows:
+		 *
+		 * Check the first page we try to write in a block.
+		 * If the check passes then we don't need to check any
+		 * more.        If the check fails, we check again...
+		 * If the block has been erased, we don't need to check.
+		 *
+		 * However, if the block has been prioritised for gc,
+		 * then we think there might be something odd about
+		 * this block and stop using it.
+		 *
+		 * Rationale: We should only ever see chunks that have
+		 * not been erased if there was a partially written
+		 * chunk due to power loss.  This checking policy should
+		 * catch that case with very few checks and thus save a
+		 * lot of checks that are most likely not needed.
+		 *
+		 * Mods to the above
+		 * If an erase check fails or the write fails we skip the 
+		 * rest of the block.
+		 */
+
+		/* let's give it a try */
+		attempts++;
+
+		if (dev->param.always_check_erased)
+			bi->skip_erased_check = 0;
+
+		if (!bi->skip_erased_check) {
+			erased_ok = yaffs_check_chunk_erased(dev, chunk);
+			if (erased_ok != YAFFS_OK) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+				  "**>> yaffs chunk %d was not erased",
+				  chunk);
+
+				/* If not erased, delete this one,
+				 * skip rest of block and
+				 * try another chunk */
+				yaffs_chunk_del(dev, chunk, 1, __LINE__);
+				yaffs_skip_rest_of_block(dev);
+				continue;
+			}
+		}
+
+		write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+		if (!bi->skip_erased_check)
+			write_ok =
+			    yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+		if (write_ok != YAFFS_OK) {
+			/* Clean up aborted write, skip to next block and
+			 * try another chunk */
+			yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+			continue;
+		}
+
+		bi->skip_erased_check = 1;
+
+		/* Copy the data into the robustification buffer */
+		yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+	} while (write_ok != YAFFS_OK &&
+		 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+	if (!write_ok)
+		chunk = -1;
+
+	if (attempts > 1) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>> yaffs write required %d attempts",
+			attempts);
+		dev->n_retired_writes += (attempts - 1);
+	}
+
+	return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+	yaffs2_checkpt_invalidate(dev);
+
+	yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+	if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+		if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"yaffs: Failed to mark bad and erase block %d",
+				flash_block);
+		} else {
+			struct yaffs_ext_tags tags;
+			int chunk_id =
+			    flash_block * dev->param.chunks_per_block;
+
+			u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+			memset(buffer, 0xff, dev->data_bytes_per_chunk);
+			yaffs_init_tags(&tags);
+			tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+			if (dev->param.write_chunk_tags_fn(dev, chunk_id -
+							   dev->chunk_offset,
+							   buffer,
+							   &tags) != YAFFS_OK)
+				yaffs_trace(YAFFS_TRACE_ALWAYS,
+					"yaffs: Failed to write bad block marker to block %d",
+					flash_block);
+
+			yaffs_release_temp_buffer(dev, buffer, __LINE__);
+		}
+	}
+
+	bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+	bi->gc_prioritise = 0;
+	bi->needs_retiring = 0;
+
+	dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR * name)
+{
+	u16 sum = 0;
+	u16 i = 1;
+
+	const YUCHAR *bname = (const YUCHAR *)name;
+	if (bname) {
+		while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH / 2))) {
+
+			/* 0x1f mask is case insensitive */
+			sum += ((*bname) & 0x1f) * i;
+			i++;
+			bname++;
+		}
+	}
+	return sum;
+}
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+	memset(obj->short_name, 0, sizeof(obj->short_name));
+	if (name && 
+	        strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+	    YAFFS_SHORT_NAME_LENGTH)
+		strcpy(obj->short_name, name);
+	else
+		obj->short_name[0] = _Y('\0');
+#endif
+	obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+				const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+	YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+	memset(tmp_name, 0, sizeof(tmp_name));
+	yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+				YAFFS_MAX_NAME_LENGTH + 1);
+	yaffs_set_obj_name(obj, tmp_name);
+#else
+	yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+	struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+	if (tn) {
+		memset(tn, 0, dev->tnode_size);
+		dev->n_tnodes++;
+	}
+
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+
+	return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+	yaffs_free_raw_tnode(dev, tn);
+	dev->n_tnodes--;
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	yaffs_deinit_raw_tnodes_and_objs(dev);
+	dev->n_obj = 0;
+	dev->n_tnodes = 0;
+}
+
+void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+			unsigned pos, unsigned val)
+{
+	u32 *map = (u32 *) tn;
+	u32 bit_in_map;
+	u32 bit_in_word;
+	u32 word_in_map;
+	u32 mask;
+
+	pos &= YAFFS_TNODES_LEVEL0_MASK;
+	val >>= dev->chunk_grp_bits;
+
+	bit_in_map = pos * dev->tnode_width;
+	word_in_map = bit_in_map / 32;
+	bit_in_word = bit_in_map & (32 - 1);
+
+	mask = dev->tnode_mask << bit_in_word;
+
+	map[word_in_map] &= ~mask;
+	map[word_in_map] |= (mask & (val << bit_in_word));
+
+	if (dev->tnode_width > (32 - bit_in_word)) {
+		bit_in_word = (32 - bit_in_word);
+		word_in_map++;;
+		mask =
+		    dev->tnode_mask >> ( /*dev->tnode_width - */ bit_in_word);
+		map[word_in_map] &= ~mask;
+		map[word_in_map] |= (mask & (val >> bit_in_word));
+	}
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+			 unsigned pos)
+{
+	u32 *map = (u32 *) tn;
+	u32 bit_in_map;
+	u32 bit_in_word;
+	u32 word_in_map;
+	u32 val;
+
+	pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+	bit_in_map = pos * dev->tnode_width;
+	word_in_map = bit_in_map / 32;
+	bit_in_word = bit_in_map & (32 - 1);
+
+	val = map[word_in_map] >> bit_in_word;
+
+	if (dev->tnode_width > (32 - bit_in_word)) {
+		bit_in_word = (32 - bit_in_word);
+		word_in_map++;;
+		val |= (map[word_in_map] << bit_in_word);
+	}
+
+	val &= dev->tnode_mask;
+	val <<= dev->chunk_grp_bits;
+
+	return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+				       struct yaffs_file_var *file_struct,
+				       u32 chunk_id)
+{
+	struct yaffs_tnode *tn = file_struct->top;
+	u32 i;
+	int required_depth;
+	int level = file_struct->top_level;
+
+	dev = dev;
+
+	/* Check sane level and chunk Id */
+	if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+		return NULL;
+
+	if (chunk_id > YAFFS_MAX_CHUNK_ID)
+		return NULL;
+
+	/* First check we're tall enough (ie enough top_level) */
+
+	i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+	required_depth = 0;
+	while (i) {
+		i >>= YAFFS_TNODES_INTERNAL_BITS;
+		required_depth++;
+	}
+
+	if (required_depth > file_struct->top_level)
+		return NULL;	/* Not tall enough, so we can't find it */
+
+	/* Traverse down to level 0 */
+	while (level > 0 && tn) {
+		tn = tn->internal[(chunk_id >>
+				   (YAFFS_TNODES_LEVEL0_BITS +
+				    (level - 1) *
+				    YAFFS_TNODES_INTERNAL_BITS)) &
+				  YAFFS_TNODES_INTERNAL_MASK];
+		level--;
+	}
+
+	return tn;
+}
+
+/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+ * This happens in two steps:
+ *  1. If the tree isn't tall enough, then make it taller.
+ *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ *  If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+ *  be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+					   struct yaffs_file_var *file_struct,
+					   u32 chunk_id,
+					   struct yaffs_tnode *passed_tn)
+{
+	int required_depth;
+	int i;
+	int l;
+	struct yaffs_tnode *tn;
+
+	u32 x;
+
+	/* Check sane level and page Id */
+	if (file_struct->top_level < 0
+	    || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+		return NULL;
+
+	if (chunk_id > YAFFS_MAX_CHUNK_ID)
+		return NULL;
+
+	/* First check we're tall enough (ie enough top_level) */
+
+	x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+	required_depth = 0;
+	while (x) {
+		x >>= YAFFS_TNODES_INTERNAL_BITS;
+		required_depth++;
+	}
+
+	if (required_depth > file_struct->top_level) {
+		/* Not tall enough, gotta make the tree taller */
+		for (i = file_struct->top_level; i < required_depth; i++) {
+
+			tn = yaffs_get_tnode(dev);
+
+			if (tn) {
+				tn->internal[0] = file_struct->top;
+				file_struct->top = tn;
+				file_struct->top_level++;
+			} else {
+				yaffs_trace(YAFFS_TRACE_ERROR, "yaffs: no more tnodes");
+				return NULL;
+			}
+		}
+	}
+
+	/* Traverse down to level 0, adding anything we need */
+
+	l = file_struct->top_level;
+	tn = file_struct->top;
+
+	if (l > 0) {
+		while (l > 0 && tn) {
+			x = (chunk_id >>
+			     (YAFFS_TNODES_LEVEL0_BITS +
+			      (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+			    YAFFS_TNODES_INTERNAL_MASK;
+
+			if ((l > 1) && !tn->internal[x]) {
+				/* Add missing non-level-zero tnode */
+				tn->internal[x] = yaffs_get_tnode(dev);
+				if (!tn->internal[x])
+					return NULL;
+			} else if (l == 1) {
+				/* Looking from level 1 at level 0 */
+				if (passed_tn) {
+					/* If we already have one, then release it. */
+					if (tn->internal[x])
+						yaffs_free_tnode(dev,
+								 tn->
+								 internal[x]);
+					tn->internal[x] = passed_tn;
+
+				} else if (!tn->internal[x]) {
+					/* Don't have one, none passed in */
+					tn->internal[x] = yaffs_get_tnode(dev);
+					if (!tn->internal[x])
+						return NULL;
+				}
+			}
+
+			tn = tn->internal[x];
+			l--;
+		}
+	} else {
+		/* top is level 0 */
+		if (passed_tn) {
+			memcpy(tn, passed_tn,
+			       (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+			yaffs_free_tnode(dev, passed_tn);
+		}
+	}
+
+	return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+			    int chunk_obj)
+{
+	return (tags->chunk_id == chunk_obj &&
+		tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+				     struct yaffs_ext_tags *tags, int obj_id,
+				     int inode_chunk)
+{
+	int j;
+
+	for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+		if (yaffs_check_chunk_bit
+		    (dev, the_chunk / dev->param.chunks_per_block,
+		     the_chunk % dev->param.chunks_per_block)) {
+
+			if (dev->chunk_grp_size == 1)
+				return the_chunk;
+			else {
+				yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+							 tags);
+				if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
+					/* found it; */
+					return the_chunk;
+				}
+			}
+		}
+		the_chunk++;
+	}
+	return -1;
+}
+
+static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+				    struct yaffs_ext_tags *tags)
+{
+	/*Get the Tnode, then get the level 0 offset chunk offset */
+	struct yaffs_tnode *tn;
+	int the_chunk = -1;
+	struct yaffs_ext_tags local_tags;
+	int ret_val = -1;
+
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (!tags) {
+		/* Passed a NULL, so use our own tags space */
+		tags = &local_tags;
+	}
+
+	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+	if (tn) {
+		the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+		ret_val =
+		    yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+					      inode_chunk);
+	}
+	return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+				     struct yaffs_ext_tags *tags)
+{
+	/* Get the Tnode, then get the level 0 offset chunk offset */
+	struct yaffs_tnode *tn;
+	int the_chunk = -1;
+	struct yaffs_ext_tags local_tags;
+
+	struct yaffs_dev *dev = in->my_dev;
+	int ret_val = -1;
+
+	if (!tags) {
+		/* Passed a NULL, so use our own tags space */
+		tags = &local_tags;
+	}
+
+	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+	if (tn) {
+
+		the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+		ret_val =
+		    yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+					      inode_chunk);
+
+		/* Delete the entry in the filestructure (if found) */
+		if (ret_val != -1)
+			yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+	}
+
+	return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+			    int nand_chunk, int in_scan)
+{
+	/* NB in_scan is zero unless scanning.
+	 * For forward scanning, in_scan is > 0;
+	 * for backward scanning in_scan is < 0
+	 *
+	 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+	 */
+
+	struct yaffs_tnode *tn;
+	struct yaffs_dev *dev = in->my_dev;
+	int existing_cunk;
+	struct yaffs_ext_tags existing_tags;
+	struct yaffs_ext_tags new_tags;
+	unsigned existing_serial, new_serial;
+
+	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+		/* Just ignore an attempt at putting a chunk into a non-file during scanning
+		 * If it is not during Scanning then something went wrong!
+		 */
+		if (!in_scan) {
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"yaffs tragedy:attempt to put data chunk into a non-file"
+				);
+			YBUG();
+		}
+
+		yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+		return YAFFS_OK;
+	}
+
+	tn = yaffs_add_find_tnode_0(dev,
+				    &in->variant.file_variant,
+				    inode_chunk, NULL);
+	if (!tn)
+		return YAFFS_FAIL;
+
+	if (!nand_chunk)
+		/* Dummy insert, bail now */
+		return YAFFS_OK;
+
+	existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+	if (in_scan != 0) {
+		/* If we're scanning then we need to test for duplicates
+		 * NB This does not need to be efficient since it should only ever
+		 * happen when the power fails during a write, then only one
+		 * chunk should ever be affected.
+		 *
+		 * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+		 * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+		 */
+
+		if (existing_cunk > 0) {
+			/* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
+			 *    thus we have to do a FindChunkInFile to get the real chunk id.
+			 *
+			 * We have a duplicate now we need to decide which one to use:
+			 *
+			 * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+			 * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+			 * YAFFS1: Get both sets of tags and compare serial numbers.
+			 */
+
+			if (in_scan > 0) {
+				/* Only do this for forward scanning */
+				yaffs_rd_chunk_tags_nand(dev,
+							 nand_chunk,
+							 NULL, &new_tags);
+
+				/* Do a proper find */
+				existing_cunk =
+				    yaffs_find_chunk_in_file(in, inode_chunk,
+							     &existing_tags);
+			}
+
+			if (existing_cunk <= 0) {
+				/*Hoosterman - how did this happen? */
+
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"yaffs tragedy: existing chunk < 0 in scan"
+					);
+
+			}
+
+			/* NB The deleted flags should be false, otherwise the chunks will
+			 * not be loaded during a scan
+			 */
+
+			if (in_scan > 0) {
+				new_serial = new_tags.serial_number;
+				existing_serial = existing_tags.serial_number;
+			}
+
+			if ((in_scan > 0) &&
+			    (existing_cunk <= 0 ||
+			     ((existing_serial + 1) & 3) == new_serial)) {
+				/* Forward scanning.
+				 * Use new
+				 * Delete the old one and drop through to update the tnode
+				 */
+				yaffs_chunk_del(dev, existing_cunk, 1,
+						__LINE__);
+			} else {
+				/* Backward scanning or we want to use the existing one
+				 * Use existing.
+				 * Delete the new one and return early so that the tnode isn't changed
+				 */
+				yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+				return YAFFS_OK;
+			}
+		}
+
+	}
+
+	if (existing_cunk == 0)
+		in->n_data_chunks++;
+
+	yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+	return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+	struct yaffs_block_info *the_block;
+	unsigned block_no;
+
+	yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+	block_no = chunk / dev->param.chunks_per_block;
+	the_block = yaffs_get_block_info(dev, block_no);
+	if (the_block) {
+		the_block->soft_del_pages++;
+		dev->n_free_chunks++;
+		yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+	}
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+ * of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+				 u32 level, int chunk_offset)
+{
+	int i;
+	int the_chunk;
+	int all_done = 1;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (tn) {
+		if (level > 0) {
+
+			for (i = YAFFS_NTNODES_INTERNAL - 1; all_done && i >= 0;
+			     i--) {
+				if (tn->internal[i]) {
+					all_done =
+					    yaffs_soft_del_worker(in,
+								  tn->internal
+								  [i],
+								  level - 1,
+								  (chunk_offset
+								   <<
+								   YAFFS_TNODES_INTERNAL_BITS)
+								  + i);
+					if (all_done) {
+						yaffs_free_tnode(dev,
+								 tn->internal
+								 [i]);
+						tn->internal[i] = NULL;
+					} else {
+						/* Hoosterman... how could this happen? */
+					}
+				}
+			}
+			return (all_done) ? 1 : 0;
+		} else if (level == 0) {
+
+			for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+				the_chunk = yaffs_get_group_base(dev, tn, i);
+				if (the_chunk) {
+					/* Note this does not find the real chunk, only the chunk group.
+					 * We make an assumption that a chunk group is not larger than
+					 * a block.
+					 */
+					yaffs_soft_del_chunk(dev, the_chunk);
+					yaffs_load_tnode_0(dev, tn, i, 0);
+				}
+
+			}
+			return 1;
+
+		}
+
+	}
+
+	return 1;
+
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	struct yaffs_obj *parent;
+
+	yaffs_verify_obj_in_dir(obj);
+	parent = obj->parent;
+
+	yaffs_verify_dir(parent);
+
+	if (dev && dev->param.remove_obj_fn)
+		dev->param.remove_obj_fn(obj);
+
+	list_del_init(&obj->siblings);
+	obj->parent = NULL;
+
+	yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+	if (!directory) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: Trying to add an object to a null pointer directory"
+			);
+		YBUG();
+		return;
+	}
+	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: Trying to add an object to a non-directory"
+			);
+		YBUG();
+	}
+
+	if (obj->siblings.prev == NULL) {
+		/* Not initialised */
+		YBUG();
+	}
+
+	yaffs_verify_dir(directory);
+
+	yaffs_remove_obj_from_dir(obj);
+
+	/* Now add it */
+	list_add(&obj->siblings, &directory->variant.dir_variant.children);
+	obj->parent = directory;
+
+	if (directory == obj->my_dev->unlinked_dir
+	    || directory == obj->my_dev->del_dir) {
+		obj->unlinked = 1;
+		obj->my_dev->n_unlinked_files++;
+		obj->rename_allowed = 0;
+	}
+
+	yaffs_verify_dir(directory);
+	yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+				 struct yaffs_obj *new_dir,
+				 const YCHAR * new_name, int force, int shadows)
+{
+	int unlink_op;
+	int del_op;
+
+	struct yaffs_obj *existing_target;
+
+	if (new_dir == NULL)
+		new_dir = obj->parent;	/* use the old directory */
+
+	if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: yaffs_change_obj_name: new_dir is not a directory"
+			);
+		YBUG();
+	}
+
+	/* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+	if (obj->my_dev->param.is_yaffs2)
+		unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+	else
+		unlink_op = (new_dir == obj->my_dev->unlinked_dir
+			     && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
+
+	del_op = (new_dir == obj->my_dev->del_dir);
+
+	existing_target = yaffs_find_by_name(new_dir, new_name);
+
+	/* If the object is a file going into the unlinked directory,
+	 *   then it is OK to just stuff it in since duplicate names are allowed.
+	 *   else only proceed if the new name does not exist and if we're putting
+	 *   it into a directory.
+	 */
+	if ((unlink_op ||
+	     del_op ||
+	     force ||
+	     (shadows > 0) ||
+	     !existing_target) &&
+	    new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_set_obj_name(obj, new_name);
+		obj->dirty = 1;
+
+		yaffs_add_obj_to_dir(new_dir, obj);
+
+		if (unlink_op)
+			obj->unlinked = 1;
+
+		/* If it is a deletion then we mark it as a shrink for gc purposes. */
+		if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >=
+		    0)
+			return YAFFS_OK;
+	}
+
+	return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ----------------------------------------
+ *   In many situations where there is no high level buffering  a lot of
+ *   reads might be short sequential reads, and a lot of writes may be short
+ *   sequential writes. eg. scanning/writing a jpeg file.
+ *   In these cases, a short read/write cache can provide a huge perfomance
+ *   benefit with dumb-as-a-rock code.
+ *   In Linux, the page cache provides read buffering and the short op cache 
+ *   provides write buffering.
+ *
+ *   There are a limited number (~10) of cache chunks per device so that we don't
+ *   need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	int i;
+	struct yaffs_cache *cache;
+	int n_caches = obj->my_dev->param.n_caches;
+
+	for (i = 0; i < n_caches; i++) {
+		cache = &dev->cache[i];
+		if (cache->object == obj && cache->dirty)
+			return 1;
+	}
+
+	return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	int lowest = -99;	/* Stop compiler whining. */
+	int i;
+	struct yaffs_cache *cache;
+	int chunk_written = 0;
+	int n_caches = obj->my_dev->param.n_caches;
+
+	if (n_caches > 0) {
+		do {
+			cache = NULL;
+
+			/* Find the dirty cache for this object with the lowest chunk id. */
+			for (i = 0; i < n_caches; i++) {
+				if (dev->cache[i].object == obj &&
+				    dev->cache[i].dirty) {
+					if (!cache
+					    || dev->cache[i].chunk_id <
+					    lowest) {
+						cache = &dev->cache[i];
+						lowest = cache->chunk_id;
+					}
+				}
+			}
+
+			if (cache && !cache->locked) {
+				/* Write it out and free it up */
+
+				chunk_written =
+				    yaffs_wr_data_obj(cache->object,
+						      cache->chunk_id,
+						      cache->data,
+						      cache->n_bytes, 1);
+				cache->dirty = 0;
+				cache->object = NULL;
+			}
+
+		} while (cache && chunk_written > 0);
+
+		if (cache)
+			/* Hoosterman, disk full while writing cache out. */
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"yaffs tragedy: no space during cache write");
+
+	}
+
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	int n_caches = dev->param.n_caches;
+	int i;
+
+	/* Find a dirty object in the cache and flush it...
+	 * until there are no further dirty objects.
+	 */
+	do {
+		obj = NULL;
+		for (i = 0; i < n_caches && !obj; i++) {
+			if (dev->cache[i].object && dev->cache[i].dirty)
+				obj = dev->cache[i].object;
+
+		}
+		if (obj)
+			yaffs_flush_file_cache(obj);
+
+	} while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+	int i;
+
+	if (dev->param.n_caches > 0) {
+		for (i = 0; i < dev->param.n_caches; i++) {
+			if (!dev->cache[i].object)
+				return &dev->cache[i];
+		}
+	}
+
+	return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+	struct yaffs_cache *cache;
+	struct yaffs_obj *the_obj;
+	int usage;
+	int i;
+	int pushout;
+
+	if (dev->param.n_caches > 0) {
+		/* Try find a non-dirty one... */
+
+		cache = yaffs_grab_chunk_worker(dev);
+
+		if (!cache) {
+			/* They were all dirty, find the last recently used object and flush
+			 * its cache, then  find again.
+			 * NB what's here is not very accurate, we actually flush the object
+			 * the last recently used page.
+			 */
+
+			/* With locking we can't assume we can use entry zero */
+
+			the_obj = NULL;
+			usage = -1;
+			cache = NULL;
+			pushout = -1;
+
+			for (i = 0; i < dev->param.n_caches; i++) {
+				if (dev->cache[i].object &&
+				    !dev->cache[i].locked &&
+				    (dev->cache[i].last_use < usage
+				     || !cache)) {
+					usage = dev->cache[i].last_use;
+					the_obj = dev->cache[i].object;
+					cache = &dev->cache[i];
+					pushout = i;
+				}
+			}
+
+			if (!cache || cache->dirty) {
+				/* Flush and try again */
+				yaffs_flush_file_cache(the_obj);
+				cache = yaffs_grab_chunk_worker(dev);
+			}
+
+		}
+		return cache;
+	} else {
+		return NULL;
+        }
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+						  int chunk_id)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	int i;
+	if (dev->param.n_caches > 0) {
+		for (i = 0; i < dev->param.n_caches; i++) {
+			if (dev->cache[i].object == obj &&
+			    dev->cache[i].chunk_id == chunk_id) {
+				dev->cache_hits++;
+
+				return &dev->cache[i];
+			}
+		}
+	}
+	return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+			    int is_write)
+{
+
+	if (dev->param.n_caches > 0) {
+		if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
+			/* Reset the cache usages */
+			int i;
+			for (i = 1; i < dev->param.n_caches; i++)
+				dev->cache[i].last_use = 0;
+
+			dev->cache_last_use = 0;
+		}
+
+		dev->cache_last_use++;
+
+		cache->last_use = dev->cache_last_use;
+
+		if (is_write)
+			cache->dirty = 1;
+	}
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+	if (object->my_dev->param.n_caches > 0) {
+		struct yaffs_cache *cache =
+		    yaffs_find_chunk_cache(object, chunk_id);
+
+		if (cache)
+			cache->object = NULL;
+	}
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+	int i;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (dev->param.n_caches > 0) {
+		/* Invalidate it. */
+		for (i = 0; i < dev->param.n_caches; i++) {
+			if (dev->cache[i].object == in)
+				dev->cache[i].object = NULL;
+		}
+	}
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+	int bucket;
+	struct yaffs_dev *dev = obj->my_dev;
+
+	/* If it is still linked into the bucket list, free from the list */
+	if (!list_empty(&obj->hash_link)) {
+		list_del_init(&obj->hash_link);
+		bucket = yaffs_hash_fn(obj->obj_id);
+		dev->obj_bucket[bucket].count--;
+	}
+}
+
+/*  FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+
+	yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+		obj, obj->my_inode);
+
+	if (!obj)
+		YBUG();
+	if (obj->parent)
+		YBUG();
+	if (!list_empty(&obj->siblings))
+		YBUG();
+
+	if (obj->my_inode) {
+		/* We're still hooked up to a cached inode.
+		 * Don't delete now, but mark for later deletion
+		 */
+		obj->defered_free = 1;
+		return;
+	}
+
+	yaffs_unhash_obj(obj);
+
+	yaffs_free_raw_obj(dev, obj);
+	dev->n_obj--;
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+	if (obj->defered_free)
+		yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+
+	/* First off, invalidate the file's data in the cache, without flushing. */
+	yaffs_invalidate_whole_cache(in);
+
+	if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
+		/* Move to the unlinked directory so we have a record that it was deleted. */
+		yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+				      0);
+
+	}
+
+	yaffs_remove_obj_from_dir(in);
+	yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+	in->hdr_chunk = 0;
+
+	yaffs_free_obj(in);
+	return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+	if (obj->deleted &&
+	    obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
+		if (obj->n_data_chunks <= 0) {
+			/* Empty file with no duplicate object headers,
+			 * just delete it immediately */
+			yaffs_free_tnode(obj->my_dev,
+					 obj->variant.file_variant.top);
+			obj->variant.file_variant.top = NULL;
+			yaffs_trace(YAFFS_TRACE_TRACING,
+				"yaffs: Deleting empty file %d",
+				obj->obj_id);
+			yaffs_generic_obj_del(obj);
+		} else {
+			yaffs_soft_del_worker(obj,
+					      obj->variant.file_variant.top,
+					      obj->variant.
+					      file_variant.top_level, 0);
+			obj->soft_del = 1;
+		}
+	}
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+					      struct yaffs_tnode *tn, u32 level,
+					      int del0)
+{
+	int i;
+	int has_data;
+
+	if (tn) {
+		has_data = 0;
+
+		if (level > 0) {
+			for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+				if (tn->internal[i]) {
+					tn->internal[i] =
+					    yaffs_prune_worker(dev,
+							       tn->internal[i],
+							       level - 1,
+							       (i ==
+								0) ? del0 : 1);
+				}
+
+				if (tn->internal[i])
+					has_data++;
+			}
+		} else {
+			int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+			u32 *map = (u32 *) tn;
+
+			for (i = 0; !has_data && i < tnode_size_u32; i++) {
+				if (map[i])
+					has_data++;
+			}
+		}
+
+		if (has_data == 0 && del0) {
+			/* Free and return NULL */
+
+			yaffs_free_tnode(dev, tn);
+			tn = NULL;
+		}
+
+	}
+
+	return tn;
+
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+			    struct yaffs_file_var *file_struct)
+{
+	int i;
+	int has_data;
+	int done = 0;
+	struct yaffs_tnode *tn;
+
+	if (file_struct->top_level > 0) {
+		file_struct->top =
+		    yaffs_prune_worker(dev, file_struct->top,
+				       file_struct->top_level, 0);
+
+		/* Now we have a tree with all the non-zero branches NULL but the height
+		 * is the same as it was.
+		 * Let's see if we can trim internal tnodes to shorten the tree.
+		 * We can do this if only the 0th element in the tnode is in use
+		 * (ie all the non-zero are NULL)
+		 */
+
+		while (file_struct->top_level && !done) {
+			tn = file_struct->top;
+
+			has_data = 0;
+			for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+				if (tn->internal[i])
+					has_data++;
+			}
+
+			if (!has_data) {
+				file_struct->top = tn->internal[0];
+				file_struct->top_level--;
+				yaffs_free_tnode(dev, tn);
+			} else {
+				done = 1;
+			}
+		}
+	}
+
+	return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+	if (obj) {
+		dev->n_obj++;
+
+		/* Now sweeten it up... */
+
+		memset(obj, 0, sizeof(struct yaffs_obj));
+		obj->being_created = 1;
+
+		obj->my_dev = dev;
+		obj->hdr_chunk = 0;
+		obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+		INIT_LIST_HEAD(&(obj->hard_links));
+		INIT_LIST_HEAD(&(obj->hash_link));
+		INIT_LIST_HEAD(&obj->siblings);
+
+		/* Now make the directory sane */
+		if (dev->root_dir) {
+			obj->parent = dev->root_dir;
+			list_add(&(obj->siblings),
+				 &dev->root_dir->variant.dir_variant.children);
+		}
+
+		/* Add it to the lost and found directory.
+		 * NB Can't put root or lost-n-found in lost-n-found so
+		 * check if lost-n-found exists first
+		 */
+		if (dev->lost_n_found)
+			yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+		obj->being_created = 0;
+	}
+
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+
+	return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+	int i;
+	int l = 999;
+	int lowest = 999999;
+
+	/* Search for the shortest list or one that
+	 * isn't too long.
+	 */
+
+	for (i = 0; i < 10 && lowest > 4; i++) {
+		dev->bucket_finder++;
+		dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+		if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+			lowest = dev->obj_bucket[dev->bucket_finder].count;
+			l = dev->bucket_finder;
+		}
+
+	}
+
+	return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+	int bucket = yaffs_find_nice_bucket(dev);
+
+	/* Now find an object value that has not already been taken
+	 * by scanning the list.
+	 */
+
+	int found = 0;
+	struct list_head *i;
+
+	u32 n = (u32) bucket;
+
+	/* yaffs_check_obj_hash_sane();  */
+
+	while (!found) {
+		found = 1;
+		n += YAFFS_NOBJECT_BUCKETS;
+		if (1 || dev->obj_bucket[bucket].count > 0) {
+			list_for_each(i, &dev->obj_bucket[bucket].list) {
+				/* If there is already one in the list */
+				if (i && list_entry(i, struct yaffs_obj,
+						    hash_link)->obj_id == n) {
+					found = 0;
+				}
+			}
+		}
+	}
+
+	return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+	int bucket = yaffs_hash_fn(in->obj_id);
+	struct yaffs_dev *dev = in->my_dev;
+
+	list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+	dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+	int bucket = yaffs_hash_fn(number);
+	struct list_head *i;
+	struct yaffs_obj *in;
+
+	list_for_each(i, &dev->obj_bucket[bucket].list) {
+		/* Look if it is in the list */
+		if (i) {
+			in = list_entry(i, struct yaffs_obj, hash_link);
+			if (in->obj_id == number) {
+
+				/* Don't tell the VFS about this one if it is defered free */
+				if (in->defered_free)
+					return NULL;
+
+				return in;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+				enum yaffs_obj_type type)
+{
+	struct yaffs_obj *the_obj = NULL;
+	struct yaffs_tnode *tn = NULL;
+
+	if (number < 0)
+		number = yaffs_new_obj_id(dev);
+
+	if (type == YAFFS_OBJECT_TYPE_FILE) {
+		tn = yaffs_get_tnode(dev);
+		if (!tn)
+			return NULL;
+	}
+
+	the_obj = yaffs_alloc_empty_obj(dev);
+	if (!the_obj) {
+		if (tn)
+			yaffs_free_tnode(dev, tn);
+		return NULL;
+	}
+
+	if (the_obj) {
+		the_obj->fake = 0;
+		the_obj->rename_allowed = 1;
+		the_obj->unlink_allowed = 1;
+		the_obj->obj_id = number;
+		yaffs_hash_obj(the_obj);
+		the_obj->variant_type = type;
+		yaffs_load_current_time(the_obj, 1, 1);
+
+		switch (type) {
+		case YAFFS_OBJECT_TYPE_FILE:
+			the_obj->variant.file_variant.file_size = 0;
+			the_obj->variant.file_variant.scanned_size = 0;
+			the_obj->variant.file_variant.shrink_size = ~0;	/* max */
+			the_obj->variant.file_variant.top_level = 0;
+			the_obj->variant.file_variant.top = tn;
+			break;
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+			INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+			INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+			break;
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+			/* No action required */
+			break;
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+			/* todo this should not happen */
+			break;
+		}
+	}
+
+	return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+					       int number, u32 mode)
+{
+
+	struct yaffs_obj *obj =
+	    yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+	if (obj) {
+		obj->fake = 1;	/* it is fake so it might have no NAND presence... */
+		obj->rename_allowed = 0;	/* ... and we're not allowed to rename it... */
+		obj->unlink_allowed = 0;	/* ... or unlink it */
+		obj->deleted = 0;
+		obj->unlinked = 0;
+		obj->yst_mode = mode;
+		obj->my_dev = dev;
+		obj->hdr_chunk = 0;	/* Not a valid chunk. */
+	}
+
+	return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	int i;
+
+	dev->n_obj = 0;
+	dev->n_tnodes = 0;
+
+	yaffs_init_raw_tnodes_and_objs(dev);
+
+	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+		INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+		dev->obj_bucket[i].count = 0;
+	}
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+						 int number,
+						 enum yaffs_obj_type type)
+{
+	struct yaffs_obj *the_obj = NULL;
+
+	if (number > 0)
+		the_obj = yaffs_find_by_number(dev, number);
+
+	if (!the_obj)
+		the_obj = yaffs_new_obj(dev, number, type);
+
+	return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR * str)
+{
+	YCHAR *new_str = NULL;
+	int len;
+
+	if (!str)
+		str = _Y("");
+
+	len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+	new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+	if (new_str) {
+		strncpy(new_str, str, len);
+		new_str[len] = 0;
+	}
+	return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ *   create dir/a : update dir's mtime/ctime
+ *   rm dir/a:   update dir's mtime/ctime
+ *   modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev;
+	if (!obj)
+		return;
+	dev = obj->my_dev;
+	obj->dirty = 1;
+	yaffs_load_current_time(obj, 0, 1);
+	if (dev->param.defered_dir_update) {
+		struct list_head *link = &obj->variant.dir_variant.dirty;
+
+		if (list_empty(link)) {
+			list_add(link, &dev->dirty_dirs);
+			yaffs_trace(YAFFS_TRACE_BACKGROUND,
+			  "Added object %d to dirty directories",
+			   obj->obj_id);
+		}
+
+	} else {
+		yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+        }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+	struct list_head *link;
+	struct yaffs_obj *obj;
+	struct yaffs_dir_var *d_s;
+	union yaffs_obj_var *o_v;
+
+	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+	while (!list_empty(&dev->dirty_dirs)) {
+		link = dev->dirty_dirs.next;
+		list_del_init(link);
+
+		d_s = list_entry(link, struct yaffs_dir_var, dirty);
+		o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+		obj = list_entry(o_v, struct yaffs_obj, variant);
+
+		yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+			obj->obj_id);
+
+		if (obj->dirty)
+			yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+	}
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+					  struct yaffs_obj *parent,
+					  const YCHAR * name,
+					  u32 mode,
+					  u32 uid,
+					  u32 gid,
+					  struct yaffs_obj *equiv_obj,
+					  const YCHAR * alias_str, u32 rdev)
+{
+	struct yaffs_obj *in;
+	YCHAR *str = NULL;
+
+	struct yaffs_dev *dev = parent->my_dev;
+
+	/* Check if the entry exists. If it does then fail the call since we don't want a dup. */
+	if (yaffs_find_by_name(parent, name))
+		return NULL;
+
+	if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+		str = yaffs_clone_str(alias_str);
+		if (!str)
+			return NULL;
+	}
+
+	in = yaffs_new_obj(dev, -1, type);
+
+	if (!in) {
+		if (str)
+			kfree(str);
+		return NULL;
+	}
+
+	if (in) {
+		in->hdr_chunk = 0;
+		in->valid = 1;
+		in->variant_type = type;
+
+		in->yst_mode = mode;
+
+		yaffs_attribs_init(in, gid, uid, rdev);
+
+		in->n_data_chunks = 0;
+
+		yaffs_set_obj_name(in, name);
+		in->dirty = 1;
+
+		yaffs_add_obj_to_dir(parent, in);
+
+		in->my_dev = parent->my_dev;
+
+		switch (type) {
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+			in->variant.symlink_variant.alias = str;
+			break;
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+			in->variant.hardlink_variant.equiv_obj = equiv_obj;
+			in->variant.hardlink_variant.equiv_id =
+			    equiv_obj->obj_id;
+			list_add(&in->hard_links, &equiv_obj->hard_links);
+			break;
+		case YAFFS_OBJECT_TYPE_FILE:
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+			/* do nothing */
+			break;
+		}
+
+		if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+			/* Could not create the object header, fail the creation */
+			yaffs_del_obj(in);
+			in = NULL;
+		}
+
+		yaffs_update_parent(parent);
+	}
+
+	return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+				    const YCHAR * name, u32 mode, u32 uid,
+				    u32 gid)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+				uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+				   u32 mode, u32 uid, u32 gid)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+				mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+				       const YCHAR * name, u32 mode, u32 uid,
+				       u32 gid, u32 rdev)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+				uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+				       const YCHAR * name, u32 mode, u32 uid,
+				       u32 gid, const YCHAR * alias)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+				uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+				 struct yaffs_obj *equiv_obj)
+{
+	/* Get the real object in case we were fed a hard link as an equivalent object */
+	equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+	if (yaffs_create_obj
+	    (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+	     equiv_obj, NULL, 0)) {
+		return equiv_obj;
+	} else {
+		return NULL;
+	}
+
+}
+
+
+
+/*------------------------- Block Management and Page Allocation ----------------*/
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+	dev->block_info = NULL;
+	dev->chunk_bits = NULL;
+
+	dev->alloc_block = -1;	/* force it to get a new one */
+
+	/* If the first allocation strategy fails, thry the alternate one */
+	dev->block_info =
+		kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+	if (!dev->block_info) {
+		dev->block_info =
+		    vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+		dev->block_info_alt = 1;
+	} else {
+		dev->block_info_alt = 0;
+        }
+
+	if (dev->block_info) {
+		/* Set up dynamic blockinfo stuff. Round up bytes. */
+		dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+		dev->chunk_bits =
+			kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+		if (!dev->chunk_bits) {
+			dev->chunk_bits =
+			    vmalloc(dev->chunk_bit_stride * n_blocks);
+			dev->chunk_bits_alt = 1;
+		} else {
+			dev->chunk_bits_alt = 0;
+                }
+	}
+
+	if (dev->block_info && dev->chunk_bits) {
+		memset(dev->block_info, 0,
+		       n_blocks * sizeof(struct yaffs_block_info));
+		memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+		return YAFFS_OK;
+	}
+
+	return YAFFS_FAIL;
+}
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+	if (dev->block_info_alt && dev->block_info)
+		vfree(dev->block_info);
+	else if (dev->block_info)
+		kfree(dev->block_info);
+
+	dev->block_info_alt = 0;
+
+	dev->block_info = NULL;
+
+	if (dev->chunk_bits_alt && dev->chunk_bits)
+		vfree(dev->chunk_bits);
+	else if (dev->chunk_bits)
+		kfree(dev->chunk_bits);
+	dev->chunk_bits_alt = 0;
+	dev->chunk_bits = NULL;
+}
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+
+	int erased_ok = 0;
+
+	/* If the block is still healthy erase it and mark as clean.
+	 * If the block has had a data failure, then retire it.
+	 */
+
+	yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+		"yaffs_block_became_dirty block %d state %d %s",
+		block_no, bi->block_state,
+		(bi->needs_retiring) ? "needs retiring" : "");
+
+	yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+	bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+	/* If this is the block being garbage collected then stop gc'ing this block */
+	if (block_no == dev->gc_block)
+		dev->gc_block = 0;
+
+	/* If this block is currently the best candidate for gc then drop as a candidate */
+	if (block_no == dev->gc_dirtiest) {
+		dev->gc_dirtiest = 0;
+		dev->gc_pages_in_use = 0;
+	}
+
+	if (!bi->needs_retiring) {
+		yaffs2_checkpt_invalidate(dev);
+		erased_ok = yaffs_erase_block(dev, block_no);
+		if (!erased_ok) {
+			dev->n_erase_failures++;
+			yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+			  "**>> Erasure failed %d", block_no);
+		}
+	}
+
+	if (erased_ok &&
+	    ((yaffs_trace_mask & YAFFS_TRACE_ERASE)
+	     || !yaffs_skip_verification(dev))) {
+		int i;
+		for (i = 0; i < dev->param.chunks_per_block; i++) {
+			if (!yaffs_check_chunk_erased
+			    (dev, block_no * dev->param.chunks_per_block + i)) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					">>Block %d erasure supposedly OK, but chunk %d not erased",
+					block_no, i);
+			}
+		}
+	}
+
+	if (erased_ok) {
+		/* Clean it up... */
+		bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+		bi->seq_number = 0;
+		dev->n_erased_blocks++;
+		bi->pages_in_use = 0;
+		bi->soft_del_pages = 0;
+		bi->has_shrink_hdr = 0;
+		bi->skip_erased_check = 1;	/* Clean, so no need to check */
+		bi->gc_prioritise = 0;
+		yaffs_clear_chunk_bits(dev, block_no);
+
+		yaffs_trace(YAFFS_TRACE_ERASE,
+			"Erased block %d", block_no);
+	} else {
+		/* We lost a block of free space */
+		dev->n_free_chunks -= dev->param.chunks_per_block;
+		yaffs_retire_block(dev, block_no);
+		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+			"**>> Block %d retired", block_no);
+	}
+}
+
+
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+	int old_chunk;
+	int new_chunk;
+	int mark_flash;
+	int ret_val = YAFFS_OK;
+	int i;
+	int is_checkpt_block;
+	int matching_chunk;
+	int max_copies;
+
+	int chunks_before = yaffs_get_erased_chunks(dev);
+	int chunks_after;
+
+	struct yaffs_ext_tags tags;
+
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+	struct yaffs_obj *object;
+
+	is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+	yaffs_trace(YAFFS_TRACE_TRACING,
+		"Collecting block %d, in use %d, shrink %d, whole_block %d",
+		block, bi->pages_in_use, bi->has_shrink_hdr,
+		whole_block);
+
+	/*yaffs_verify_free_chunks(dev); */
+
+	if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+		bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+	bi->has_shrink_hdr = 0;	/* clear the flag so that the block can erase */
+
+	dev->gc_disable = 1;
+
+	if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+		yaffs_trace(YAFFS_TRACE_TRACING,
+			"Collecting block %d that has no chunks in use",
+		   	block);
+		yaffs_block_became_dirty(dev, block);
+	} else {
+
+		u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+		yaffs_verify_blk(dev, bi, block);
+
+		max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+		old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+		for ( /* init already done */ ;
+		     ret_val == YAFFS_OK &&
+		     dev->gc_chunk < dev->param.chunks_per_block &&
+		     (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+		     max_copies > 0; dev->gc_chunk++, old_chunk++) {
+			if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+				/* This page is in use and might need to be copied off */
+
+				max_copies--;
+
+				mark_flash = 1;
+
+				yaffs_init_tags(&tags);
+
+				yaffs_rd_chunk_tags_nand(dev, old_chunk,
+							 buffer, &tags);
+
+				object = yaffs_find_by_number(dev, tags.obj_id);
+
+				yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+					"Collecting chunk in block %d, %d %d %d ",
+					dev->gc_chunk, tags.obj_id,
+					tags.chunk_id, tags.n_bytes);
+
+				if (object && !yaffs_skip_verification(dev)) {
+					if (tags.chunk_id == 0)
+						matching_chunk =
+						    object->hdr_chunk;
+					else if (object->soft_del)
+						matching_chunk = old_chunk;	/* Defeat the test */
+					else
+						matching_chunk =
+						    yaffs_find_chunk_in_file
+						    (object, tags.chunk_id,
+						     NULL);
+
+					if (old_chunk != matching_chunk)
+						yaffs_trace(YAFFS_TRACE_ERROR,
+							"gc: page in gc mismatch: %d %d %d %d",
+							old_chunk,
+							matching_chunk,
+							tags.obj_id,
+						   	tags.chunk_id);
+
+				}
+
+				if (!object) {
+					yaffs_trace(YAFFS_TRACE_ERROR,
+						"page %d in gc has no object: %d %d %d ",
+						old_chunk,
+						tags.obj_id, tags.chunk_id,
+						tags.n_bytes);
+				}
+
+				if (object &&
+				    object->deleted &&
+				    object->soft_del && tags.chunk_id != 0) {
+					/* Data chunk in a soft deleted file, throw it away
+					 * It's a soft deleted data chunk,
+					 * No need to copy this, just forget about it and
+					 * fix up the object.
+					 */
+
+					/* Free chunks already includes softdeleted chunks.
+					 * How ever this chunk is going to soon be really deleted
+					 * which will increment free chunks.
+					 * We have to decrement free chunks so this works out properly.
+					 */
+					dev->n_free_chunks--;
+					bi->soft_del_pages--;
+
+					object->n_data_chunks--;
+
+					if (object->n_data_chunks <= 0) {
+						/* remeber to clean up the object */
+						dev->gc_cleanup_list[dev->
+								     n_clean_ups]
+						    = tags.obj_id;
+						dev->n_clean_ups++;
+					}
+					mark_flash = 0;
+				} else if (0) {
+					/* Todo object && object->deleted && object->n_data_chunks == 0 */
+					/* Deleted object header with no data chunks.
+					 * Can be discarded and the file deleted.
+					 */
+					object->hdr_chunk = 0;
+					yaffs_free_tnode(object->my_dev,
+							 object->
+							 variant.file_variant.
+							 top);
+					object->variant.file_variant.top = NULL;
+					yaffs_generic_obj_del(object);
+
+				} else if (object) {
+					/* It's either a data chunk in a live file or
+					 * an ObjectHeader, so we're interested in it.
+					 * NB Need to keep the ObjectHeaders of deleted files
+					 * until the whole file has been deleted off
+					 */
+					tags.serial_number++;
+
+					dev->n_gc_copies++;
+
+					if (tags.chunk_id == 0) {
+						/* It is an object Id,
+						 * We need to nuke the shrinkheader flags first
+						 * Also need to clean up shadowing.
+						 * We no longer want the shrink_header flag since its work is done
+						 * and if it is left in place it will mess up scanning.
+						 */
+
+						struct yaffs_obj_hdr *oh;
+						oh = (struct yaffs_obj_hdr *)
+						    buffer;
+
+						oh->is_shrink = 0;
+						tags.extra_is_shrink = 0;
+
+						oh->shadows_obj = 0;
+						oh->inband_shadowed_obj_id = 0;
+						tags.extra_shadows = 0;
+
+						/* Update file size */
+						if (object->variant_type ==
+						    YAFFS_OBJECT_TYPE_FILE) {
+							oh->file_size =
+							    object->variant.
+							    file_variant.
+							    file_size;
+							tags.extra_length =
+							    oh->file_size;
+						}
+
+						yaffs_verify_oh(object, oh,
+								&tags, 1);
+						new_chunk =
+						    yaffs_write_new_chunk(dev,
+									  (u8 *)
+									  oh,
+									  &tags,
+									  1);
+					} else {
+						new_chunk =
+						    yaffs_write_new_chunk(dev,
+									  buffer,
+									  &tags,
+									  1);
+                                        }
+
+					if (new_chunk < 0) {
+						ret_val = YAFFS_FAIL;
+					} else {
+
+						/* Ok, now fix up the Tnodes etc. */
+
+						if (tags.chunk_id == 0) {
+							/* It's a header */
+							object->hdr_chunk =
+							    new_chunk;
+							object->serial =
+							    tags.serial_number;
+						} else {
+							/* It's a data chunk */
+							int ok;
+							ok = yaffs_put_chunk_in_file(object, tags.chunk_id, new_chunk, 0);
+						}
+					}
+				}
+
+				if (ret_val == YAFFS_OK)
+					yaffs_chunk_del(dev, old_chunk,
+							mark_flash, __LINE__);
+
+			}
+		}
+
+		yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+	}
+
+	yaffs_verify_collected_blk(dev, bi, block);
+
+	if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+		/*
+		 * The gc did not complete. Set block state back to FULL
+		 * because checkpointing does not restore gc.
+		 */
+		bi->block_state = YAFFS_BLOCK_STATE_FULL;
+	} else {
+		/* The gc completed. */
+		/* Do any required cleanups */
+		for (i = 0; i < dev->n_clean_ups; i++) {
+			/* Time to delete the file too */
+			object =
+			    yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+			if (object) {
+				yaffs_free_tnode(dev,
+						 object->variant.
+						 file_variant.top);
+				object->variant.file_variant.top = NULL;
+				yaffs_trace(YAFFS_TRACE_GC,
+					"yaffs: About to finally delete object %d",
+					object->obj_id);
+				yaffs_generic_obj_del(object);
+				object->my_dev->n_deleted_files--;
+			}
+
+		}
+
+		chunks_after = yaffs_get_erased_chunks(dev);
+		if (chunks_before >= chunks_after)
+			yaffs_trace(YAFFS_TRACE_GC,
+				"gc did not increase free chunks before %d after %d",
+				chunks_before, chunks_after);
+		dev->gc_block = 0;
+		dev->gc_chunk = 0;
+		dev->n_clean_ups = 0;
+	}
+
+	dev->gc_disable = 0;
+
+	return ret_val;
+}
+
+/*
+ * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+				    int aggressive, int background)
+{
+	int i;
+	int iterations;
+	unsigned selected = 0;
+	int prioritised = 0;
+	int prioritised_exist = 0;
+	struct yaffs_block_info *bi;
+	int threshold;
+
+	/* First let's see if we need to grab a prioritised block */
+	if (dev->has_pending_prioritised_gc && !aggressive) {
+		dev->gc_dirtiest = 0;
+		bi = dev->block_info;
+		for (i = dev->internal_start_block;
+		     i <= dev->internal_end_block && !selected; i++) {
+
+			if (bi->gc_prioritise) {
+				prioritised_exist = 1;
+				if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+				    yaffs_block_ok_for_gc(dev, bi)) {
+					selected = i;
+					prioritised = 1;
+				}
+			}
+			bi++;
+		}
+
+		/*
+		 * If there is a prioritised block and none was selected then
+		 * this happened because there is at least one old dirty block gumming
+		 * up the works. Let's gc the oldest dirty block.
+		 */
+
+		if (prioritised_exist &&
+		    !selected && dev->oldest_dirty_block > 0)
+			selected = dev->oldest_dirty_block;
+
+		if (!prioritised_exist)	/* None found, so we can clear this */
+			dev->has_pending_prioritised_gc = 0;
+	}
+
+	/* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+	 * search harder.
+	 * else (we're doing a leasurely gc), then we only bother to do this if the
+	 * block has only a few pages in use.
+	 */
+
+	if (!selected) {
+		int pages_used;
+		int n_blocks =
+		    dev->internal_end_block - dev->internal_start_block + 1;
+		if (aggressive) {
+			threshold = dev->param.chunks_per_block;
+			iterations = n_blocks;
+		} else {
+			int max_threshold;
+
+			if (background)
+				max_threshold = dev->param.chunks_per_block / 2;
+			else
+				max_threshold = dev->param.chunks_per_block / 8;
+
+			if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+				max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+			threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+			if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+				threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+			if (threshold > max_threshold)
+				threshold = max_threshold;
+
+			iterations = n_blocks / 16 + 1;
+			if (iterations > 100)
+				iterations = 100;
+		}
+
+		for (i = 0;
+		     i < iterations &&
+		     (dev->gc_dirtiest < 1 ||
+		      dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); i++) {
+			dev->gc_block_finder++;
+			if (dev->gc_block_finder < dev->internal_start_block ||
+			    dev->gc_block_finder > dev->internal_end_block)
+				dev->gc_block_finder =
+				    dev->internal_start_block;
+
+			bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+			pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+			if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+			    pages_used < dev->param.chunks_per_block &&
+			    (dev->gc_dirtiest < 1
+			     || pages_used < dev->gc_pages_in_use)
+			    && yaffs_block_ok_for_gc(dev, bi)) {
+				dev->gc_dirtiest = dev->gc_block_finder;
+				dev->gc_pages_in_use = pages_used;
+			}
+		}
+
+		if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+			selected = dev->gc_dirtiest;
+	}
+
+	/*
+	 * If nothing has been selected for a while, try selecting the oldest dirty
+	 * because that's gumming up the works.
+	 */
+
+	if (!selected && dev->param.is_yaffs2 &&
+	    dev->gc_not_done >= (background ? 10 : 20)) {
+		yaffs2_find_oldest_dirty_seq(dev);
+		if (dev->oldest_dirty_block > 0) {
+			selected = dev->oldest_dirty_block;
+			dev->gc_dirtiest = selected;
+			dev->oldest_dirty_gc_count++;
+			bi = yaffs_get_block_info(dev, selected);
+			dev->gc_pages_in_use =
+			    bi->pages_in_use - bi->soft_del_pages;
+		} else {
+			dev->gc_not_done = 0;
+                }
+	}
+
+	if (selected) {
+		yaffs_trace(YAFFS_TRACE_GC,
+			"GC Selected block %d with %d free, prioritised:%d",
+			selected,
+			dev->param.chunks_per_block - dev->gc_pages_in_use,
+			prioritised);
+
+		dev->n_gc_blocks++;
+		if (background)
+			dev->bg_gcs++;
+
+		dev->gc_dirtiest = 0;
+		dev->gc_pages_in_use = 0;
+		dev->gc_not_done = 0;
+		if (dev->refresh_skip > 0)
+			dev->refresh_skip--;
+	} else {
+		dev->gc_not_done++;
+		yaffs_trace(YAFFS_TRACE_GC,
+			"GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+			dev->gc_block_finder, dev->gc_not_done, threshold,
+			dev->gc_dirtiest, dev->gc_pages_in_use,
+			dev->oldest_dirty_block, background ? " bg" : "");
+	}
+
+	return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+	int aggressive = 0;
+	int gc_ok = YAFFS_OK;
+	int max_tries = 0;
+	int min_erased;
+	int erased_chunks;
+	int checkpt_block_adjust;
+
+	if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
+		return YAFFS_OK;
+
+	if (dev->gc_disable) {
+		/* Bail out so we don't get recursive gc */
+		return YAFFS_OK;
+	}
+
+	/* This loop should pass the first time.
+	 * We'll only see looping here if the collection does not increase space.
+	 */
+
+	do {
+		max_tries++;
+
+		checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+		min_erased =
+		    dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+		erased_chunks =
+		    dev->n_erased_blocks * dev->param.chunks_per_block;
+
+		/* If we need a block soon then do aggressive gc. */
+		if (dev->n_erased_blocks < min_erased)
+			aggressive = 1;
+		else {
+			if (!background
+			    && erased_chunks > (dev->n_free_chunks / 4))
+				break;
+
+			if (dev->gc_skip > 20)
+				dev->gc_skip = 20;
+			if (erased_chunks < dev->n_free_chunks / 2 ||
+			    dev->gc_skip < 1 || background)
+				aggressive = 0;
+			else {
+				dev->gc_skip--;
+				break;
+			}
+		}
+
+		dev->gc_skip = 5;
+
+		/* If we don't already have a block being gc'd then see if we should start another */
+
+		if (dev->gc_block < 1 && !aggressive) {
+			dev->gc_block = yaffs2_find_refresh_block(dev);
+			dev->gc_chunk = 0;
+			dev->n_clean_ups = 0;
+		}
+		if (dev->gc_block < 1) {
+			dev->gc_block =
+			    yaffs_find_gc_block(dev, aggressive, background);
+			dev->gc_chunk = 0;
+			dev->n_clean_ups = 0;
+		}
+
+		if (dev->gc_block > 0) {
+			dev->all_gcs++;
+			if (!aggressive)
+				dev->passive_gc_count++;
+
+			yaffs_trace(YAFFS_TRACE_GC,
+				"yaffs: GC n_erased_blocks %d aggressive %d",
+				dev->n_erased_blocks, aggressive);
+
+			gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+		}
+
+		if (dev->n_erased_blocks < (dev->param.n_reserved_blocks)
+		    && dev->gc_block > 0) {
+			yaffs_trace(YAFFS_TRACE_GC,
+				"yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+				dev->n_erased_blocks, max_tries,
+				dev->gc_block);
+		}
+	} while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+		 (dev->gc_block > 0) && (max_tries < 2));
+
+	return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+	int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+	yaffs_check_gc(dev, 1);
+	return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+	int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+	if (nand_chunk >= 0)
+		return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+						buffer, NULL);
+	else {
+		yaffs_trace(YAFFS_TRACE_NANDACCESS,
+			"Chunk %d not found zero instead",
+			nand_chunk);
+		/* get sane (zero) data if you read a hole */
+		memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+		return 0;
+	}
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+		     int lyn)
+{
+	int block;
+	int page;
+	struct yaffs_ext_tags tags;
+	struct yaffs_block_info *bi;
+
+	if (chunk_id <= 0)
+		return;
+
+	dev->n_deletions++;
+	block = chunk_id / dev->param.chunks_per_block;
+	page = chunk_id % dev->param.chunks_per_block;
+
+	if (!yaffs_check_chunk_bit(dev, block, page))
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Deleting invalid chunk %d", chunk_id);
+
+	bi = yaffs_get_block_info(dev, block);
+
+	yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+	yaffs_trace(YAFFS_TRACE_DELETION,
+		"line %d delete of chunk %d",
+		lyn, chunk_id);
+
+	if (!dev->param.is_yaffs2 && mark_flash &&
+	    bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+		yaffs_init_tags(&tags);
+
+		tags.is_deleted = 1;
+
+		yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+		yaffs_handle_chunk_update(dev, chunk_id, &tags);
+	} else {
+		dev->n_unmarked_deletions++;
+	}
+
+	/* Pull out of the management area.
+	 * If the whole block became dirty, this will kick off an erasure.
+	 */
+	if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+	    bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+	    bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+	    bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+		dev->n_free_chunks++;
+
+		yaffs_clear_chunk_bit(dev, block, page);
+
+		bi->pages_in_use--;
+
+		if (bi->pages_in_use == 0 &&
+		    !bi->has_shrink_hdr &&
+		    bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+		    bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+			yaffs_block_became_dirty(dev, block);
+		}
+
+	}
+
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+			     const u8 * buffer, int n_bytes, int use_reserve)
+{
+	/* Find old chunk Need to do this to get serial number
+	 * Write new one and patch into tree.
+	 * Invalidate old tags.
+	 */
+
+	int prev_chunk_id;
+	struct yaffs_ext_tags prev_tags;
+
+	int new_chunk_id;
+	struct yaffs_ext_tags new_tags;
+
+	struct yaffs_dev *dev = in->my_dev;
+
+	yaffs_check_gc(dev, 0);
+
+	/* Get the previous chunk at this location in the file if it exists.
+	 * If it does not exist then put a zero into the tree. This creates
+	 * the tnode now, rather than later when it is harder to clean up.
+	 */
+	prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+	if (prev_chunk_id < 1 &&
+	    !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+		return 0;
+
+	/* Set up new tags */
+	yaffs_init_tags(&new_tags);
+
+	new_tags.chunk_id = inode_chunk;
+	new_tags.obj_id = in->obj_id;
+	new_tags.serial_number =
+	    (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+	new_tags.n_bytes = n_bytes;
+
+	if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+		  "Writing %d bytes to chunk!!!!!!!!!",
+		   n_bytes);
+		YBUG();
+	}
+
+	new_chunk_id =
+	    yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+	if (new_chunk_id > 0) {
+		yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+		if (prev_chunk_id > 0)
+			yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+		yaffs_verify_file_sane(in);
+	}
+	return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+				const YCHAR * name, const void *value, int size,
+				int flags)
+{
+	struct yaffs_xattr_mod xmod;
+
+	int result;
+
+	xmod.set = set;
+	xmod.name = name;
+	xmod.data = value;
+	xmod.size = size;
+	xmod.flags = flags;
+	xmod.result = -ENOSPC;
+
+	result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+	if (result > 0)
+		return xmod.result;
+	else
+		return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+				   struct yaffs_xattr_mod *xmod)
+{
+	int retval = 0;
+	int x_offs = sizeof(struct yaffs_obj_hdr);
+	struct yaffs_dev *dev = obj->my_dev;
+	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+	char *x_buffer = buffer + x_offs;
+
+	if (xmod->set)
+		retval =
+		    nval_set(x_buffer, x_size, xmod->name, xmod->data,
+			     xmod->size, xmod->flags);
+	else
+		retval = nval_del(x_buffer, x_size, xmod->name);
+
+	obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+	obj->xattr_known = 1;
+
+	xmod->result = retval;
+
+	return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR * name,
+				  void *value, int size)
+{
+	char *buffer = NULL;
+	int result;
+	struct yaffs_ext_tags tags;
+	struct yaffs_dev *dev = obj->my_dev;
+	int x_offs = sizeof(struct yaffs_obj_hdr);
+	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+	char *x_buffer;
+
+	int retval = 0;
+
+	if (obj->hdr_chunk < 1)
+		return -ENODATA;
+
+	/* If we know that the object has no xattribs then don't do all the
+	 * reading and parsing.
+	 */
+	if (obj->xattr_known && !obj->has_xattr) {
+		if (name)
+			return -ENODATA;
+		else
+			return 0;
+	}
+
+	buffer = (char *)yaffs_get_temp_buffer(dev, __LINE__);
+	if (!buffer)
+		return -ENOMEM;
+
+	result =
+	    yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+	if (result != YAFFS_OK)
+		retval = -ENOENT;
+	else {
+		x_buffer = buffer + x_offs;
+
+		if (!obj->xattr_known) {
+			obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+			obj->xattr_known = 1;
+		}
+
+		if (name)
+			retval = nval_get(x_buffer, x_size, name, value, size);
+		else
+			retval = nval_list(x_buffer, x_size, value, size);
+	}
+	yaffs_release_temp_buffer(dev, (u8 *) buffer, __LINE__);
+	return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+		      const void *value, int size, int flags)
+{
+	return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+	return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+		      int size)
+{
+	return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+	return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+	u8 *chunk_data;
+	struct yaffs_obj_hdr *oh;
+	struct yaffs_dev *dev;
+	struct yaffs_ext_tags tags;
+	int result;
+	int alloc_failed = 0;
+
+	if (!in)
+		return;
+
+	dev = in->my_dev;
+
+	if (in->lazy_loaded && in->hdr_chunk > 0) {
+		in->lazy_loaded = 0;
+		chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+		result =
+		    yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunk_data,
+					     &tags);
+		oh = (struct yaffs_obj_hdr *)chunk_data;
+
+		in->yst_mode = oh->yst_mode;
+		yaffs_load_attribs(in, oh);
+		yaffs_set_obj_name_from_oh(in, oh);
+
+		if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+			in->variant.symlink_variant.alias =
+			    yaffs_clone_str(oh->alias);
+			if (!in->variant.symlink_variant.alias)
+				alloc_failed = 1;	/* Not returned to caller */
+		}
+
+		yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+	}
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR * name,
+				    const YCHAR * oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+	if (dev->param.auto_unicode) {
+		if (*oh_name) {
+			/* It is an ASCII name, do an ASCII to
+			 * unicode conversion */
+			const char *ascii_oh_name = (const char *)oh_name;
+			int n = buff_size - 1;
+			while (n > 0 && *ascii_oh_name) {
+				*name = *ascii_oh_name;
+				name++;
+				ascii_oh_name++;
+				n--;
+			}
+		} else {
+			strncpy(name, oh_name + 1, buff_size - 1);
+                }
+	} else {
+#else
+        {
+#endif
+		strncpy(name, oh_name, buff_size - 1);
+        }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR * oh_name,
+				    const YCHAR * name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+	int is_ascii;
+	YCHAR *w;
+
+	if (dev->param.auto_unicode) {
+
+		is_ascii = 1;
+		w = name;
+
+		/* Figure out if the name will fit in ascii character set */
+		while (is_ascii && *w) {
+			if ((*w) & 0xff00)
+				is_ascii = 0;
+			w++;
+		}
+
+		if (is_ascii) {
+			/* It is an ASCII name, so do a unicode to ascii conversion */
+			char *ascii_oh_name = (char *)oh_name;
+			int n = YAFFS_MAX_NAME_LENGTH - 1;
+			while (n > 0 && *name) {
+				*ascii_oh_name = *name;
+				name++;
+				ascii_oh_name++;
+				n--;
+			}
+		} else {
+			/* It is a unicode name, so save starting at the second YCHAR */
+			*oh_name = 0;
+			strncpy(oh_name + 1, name,
+				      YAFFS_MAX_NAME_LENGTH - 2);
+		}
+	} else {
+#else
+        {
+#endif
+		strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+        }
+
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name, int force,
+		    int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+	struct yaffs_block_info *bi;
+
+	struct yaffs_dev *dev = in->my_dev;
+
+	int prev_chunk_id;
+	int ret_val = 0;
+	int result = 0;
+
+	int new_chunk_id;
+	struct yaffs_ext_tags new_tags;
+	struct yaffs_ext_tags old_tags;
+	const YCHAR *alias = NULL;
+
+	u8 *buffer = NULL;
+	YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+
+	struct yaffs_obj_hdr *oh = NULL;
+
+	strcpy(old_name, _Y("silly old name"));
+
+	if (!in->fake || in == dev->root_dir ||
+	    force || xmod) {
+
+		yaffs_check_gc(dev, 0);
+		yaffs_check_obj_details_loaded(in);
+
+		buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
+		oh = (struct yaffs_obj_hdr *)buffer;
+
+		prev_chunk_id = in->hdr_chunk;
+
+		if (prev_chunk_id > 0) {
+			result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+							  buffer, &old_tags);
+
+			yaffs_verify_oh(in, oh, &old_tags, 0);
+
+			memcpy(old_name, oh->name, sizeof(oh->name));
+			memset(buffer, 0xFF, sizeof(struct yaffs_obj_hdr));
+		} else {
+			memset(buffer, 0xFF, dev->data_bytes_per_chunk);
+                }
+
+		oh->type = in->variant_type;
+		oh->yst_mode = in->yst_mode;
+		oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+		yaffs_load_attribs_oh(oh, in);
+
+		if (in->parent)
+			oh->parent_obj_id = in->parent->obj_id;
+		else
+			oh->parent_obj_id = 0;
+
+		if (name && *name) {
+			memset(oh->name, 0, sizeof(oh->name));
+			yaffs_load_oh_from_name(dev, oh->name, name);
+		} else if (prev_chunk_id > 0) {
+			memcpy(oh->name, old_name, sizeof(oh->name));
+		} else {
+			memset(oh->name, 0, sizeof(oh->name));
+                }
+
+		oh->is_shrink = is_shrink;
+
+		switch (in->variant_type) {
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+			/* Should not happen */
+			break;
+		case YAFFS_OBJECT_TYPE_FILE:
+			oh->file_size =
+			    (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
+			     || oh->parent_obj_id ==
+			     YAFFS_OBJECTID_UNLINKED) ? 0 : in->
+			    variant.file_variant.file_size;
+			break;
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+			oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+			break;
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+			/* Do nothing */
+			break;
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+			/* Do nothing */
+			break;
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+			alias = in->variant.symlink_variant.alias;
+			if (!alias)
+				alias = _Y("no alias");
+			strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+			oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+			break;
+		}
+
+		/* process any xattrib modifications */
+		if (xmod)
+			yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+		/* Tags */
+		yaffs_init_tags(&new_tags);
+		in->serial++;
+		new_tags.chunk_id = 0;
+		new_tags.obj_id = in->obj_id;
+		new_tags.serial_number = in->serial;
+
+		/* Add extra info for file header */
+
+		new_tags.extra_available = 1;
+		new_tags.extra_parent_id = oh->parent_obj_id;
+		new_tags.extra_length = oh->file_size;
+		new_tags.extra_is_shrink = oh->is_shrink;
+		new_tags.extra_equiv_id = oh->equiv_id;
+		new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+		new_tags.extra_obj_type = in->variant_type;
+
+		yaffs_verify_oh(in, oh, &new_tags, 1);
+
+		/* Create new chunk in NAND */
+		new_chunk_id =
+		    yaffs_write_new_chunk(dev, buffer, &new_tags,
+					  (prev_chunk_id > 0) ? 1 : 0);
+
+		if (new_chunk_id >= 0) {
+
+			in->hdr_chunk = new_chunk_id;
+
+			if (prev_chunk_id > 0) {
+				yaffs_chunk_del(dev, prev_chunk_id, 1,
+						__LINE__);
+			}
+
+			if (!yaffs_obj_cache_dirty(in))
+				in->dirty = 0;
+
+			/* If this was a shrink, then mark the block that the chunk lives on */
+			if (is_shrink) {
+				bi = yaffs_get_block_info(in->my_dev,
+							  new_chunk_id /
+							  in->my_dev->param.
+							  chunks_per_block);
+				bi->has_shrink_hdr = 1;
+			}
+
+		}
+
+		ret_val = new_chunk_id;
+
+	}
+
+	if (buffer)
+		yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+	return ret_val;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+
+	int chunk;
+	u32 start;
+	int n_copy;
+	int n = n_bytes;
+	int n_done = 0;
+	struct yaffs_cache *cache;
+
+	struct yaffs_dev *dev;
+
+	dev = in->my_dev;
+
+	while (n > 0) {
+		/* chunk = offset / dev->data_bytes_per_chunk + 1; */
+		/* start = offset % dev->data_bytes_per_chunk; */
+		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+		chunk++;
+
+		/* OK now check for the curveball where the start and end are in
+		 * the same chunk.
+		 */
+		if ((start + n) < dev->data_bytes_per_chunk)
+			n_copy = n;
+		else
+			n_copy = dev->data_bytes_per_chunk - start;
+
+		cache = yaffs_find_chunk_cache(in, chunk);
+
+		/* If the chunk is already in the cache or it is less than a whole chunk
+		 * or we're using inband tags then use the cache (if there is caching)
+		 * else bypass the cache.
+		 */
+		if (cache || n_copy != dev->data_bytes_per_chunk
+		    || dev->param.inband_tags) {
+			if (dev->param.n_caches > 0) {
+
+				/* If we can't find the data in the cache, then load it up. */
+
+				if (!cache) {
+					cache =
+					    yaffs_grab_chunk_cache(in->my_dev);
+					cache->object = in;
+					cache->chunk_id = chunk;
+					cache->dirty = 0;
+					cache->locked = 0;
+					yaffs_rd_data_obj(in, chunk,
+							  cache->data);
+					cache->n_bytes = 0;
+				}
+
+				yaffs_use_cache(dev, cache, 0);
+
+				cache->locked = 1;
+
+				memcpy(buffer, &cache->data[start], n_copy);
+
+				cache->locked = 0;
+			} else {
+				/* Read into the local buffer then copy.. */
+
+				u8 *local_buffer =
+				    yaffs_get_temp_buffer(dev, __LINE__);
+				yaffs_rd_data_obj(in, chunk, local_buffer);
+
+				memcpy(buffer, &local_buffer[start], n_copy);
+
+				yaffs_release_temp_buffer(dev, local_buffer,
+							  __LINE__);
+			}
+
+		} else {
+
+			/* A full chunk. Read directly into the supplied buffer. */
+			yaffs_rd_data_obj(in, chunk, buffer);
+
+		}
+
+		n -= n_copy;
+		offset += n_copy;
+		buffer += n_copy;
+		n_done += n_copy;
+
+	}
+
+	return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+		     int n_bytes, int write_trhrough)
+{
+
+	int chunk;
+	u32 start;
+	int n_copy;
+	int n = n_bytes;
+	int n_done = 0;
+	int n_writeback;
+	int start_write = offset;
+	int chunk_written = 0;
+	u32 n_bytes_read;
+	u32 chunk_start;
+
+	struct yaffs_dev *dev;
+
+	dev = in->my_dev;
+
+	while (n > 0 && chunk_written >= 0) {
+		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+		if (chunk * dev->data_bytes_per_chunk + start != offset ||
+		    start >= dev->data_bytes_per_chunk) {
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"AddrToChunk of offset %d gives chunk %d start %d",
+				(int)offset, chunk, start);
+		}
+		chunk++;	/* File pos to chunk in file offset */
+
+		/* OK now check for the curveball where the start and end are in
+		 * the same chunk.
+		 */
+
+		if ((start + n) < dev->data_bytes_per_chunk) {
+			n_copy = n;
+
+			/* Now folks, to calculate how many bytes to write back....
+			 * If we're overwriting and not writing to then end of file then
+			 * we need to write back as much as was there before.
+			 */
+
+			chunk_start = ((chunk - 1) * dev->data_bytes_per_chunk);
+
+			if (chunk_start > in->variant.file_variant.file_size)
+				n_bytes_read = 0;	/* Past end of file */
+			else
+				n_bytes_read =
+				    in->variant.file_variant.file_size -
+				    chunk_start;
+
+			if (n_bytes_read > dev->data_bytes_per_chunk)
+				n_bytes_read = dev->data_bytes_per_chunk;
+
+			n_writeback =
+			    (n_bytes_read >
+			     (start + n)) ? n_bytes_read : (start + n);
+
+			if (n_writeback < 0
+			    || n_writeback > dev->data_bytes_per_chunk)
+				YBUG();
+
+		} else {
+			n_copy = dev->data_bytes_per_chunk - start;
+			n_writeback = dev->data_bytes_per_chunk;
+		}
+
+		if (n_copy != dev->data_bytes_per_chunk
+		    || dev->param.inband_tags) {
+			/* An incomplete start or end chunk (or maybe both start and end chunk),
+			 * or we're using inband tags, so we want to use the cache buffers.
+			 */
+			if (dev->param.n_caches > 0) {
+				struct yaffs_cache *cache;
+				/* If we can't find the data in the cache, then load the cache */
+				cache = yaffs_find_chunk_cache(in, chunk);
+
+				if (!cache
+				    && yaffs_check_alloc_available(dev, 1)) {
+					cache = yaffs_grab_chunk_cache(dev);
+					cache->object = in;
+					cache->chunk_id = chunk;
+					cache->dirty = 0;
+					cache->locked = 0;
+					yaffs_rd_data_obj(in, chunk,
+							  cache->data);
+				} else if (cache &&
+					   !cache->dirty &&
+					   !yaffs_check_alloc_available(dev,
+									1)) {
+					/* Drop the cache if it was a read cache item and
+					 * no space check has been made for it.
+					 */
+					cache = NULL;
+				}
+
+				if (cache) {
+					yaffs_use_cache(dev, cache, 1);
+					cache->locked = 1;
+
+					memcpy(&cache->data[start], buffer,
+					       n_copy);
+
+					cache->locked = 0;
+					cache->n_bytes = n_writeback;
+
+					if (write_trhrough) {
+						chunk_written =
+						    yaffs_wr_data_obj
+						    (cache->object,
+						     cache->chunk_id,
+						     cache->data,
+						     cache->n_bytes, 1);
+						cache->dirty = 0;
+					}
+
+				} else {
+					chunk_written = -1;	/* fail the write */
+				}
+			} else {
+				/* An incomplete start or end chunk (or maybe both start and end chunk)
+				 * Read into the local buffer then copy, then copy over and write back.
+				 */
+
+				u8 *local_buffer =
+				    yaffs_get_temp_buffer(dev, __LINE__);
+
+				yaffs_rd_data_obj(in, chunk, local_buffer);
+
+				memcpy(&local_buffer[start], buffer, n_copy);
+
+				chunk_written =
+				    yaffs_wr_data_obj(in, chunk,
+						      local_buffer,
+						      n_writeback, 0);
+
+				yaffs_release_temp_buffer(dev, local_buffer,
+							  __LINE__);
+
+			}
+
+		} else {
+			/* A full chunk. Write directly from the supplied buffer. */
+
+			chunk_written =
+			    yaffs_wr_data_obj(in, chunk, buffer,
+					      dev->data_bytes_per_chunk, 0);
+
+			/* Since we've overwritten the cached data, we better invalidate it. */
+			yaffs_invalidate_chunk_cache(in, chunk);
+		}
+
+		if (chunk_written >= 0) {
+			n -= n_copy;
+			offset += n_copy;
+			buffer += n_copy;
+			n_done += n_copy;
+		}
+
+	}
+
+	/* Update file object */
+
+	if ((start_write + n_done) > in->variant.file_variant.file_size)
+		in->variant.file_variant.file_size = (start_write + n_done);
+
+	in->dirty = 1;
+
+	return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+		  int n_bytes, int write_trhrough)
+{
+	yaffs2_handle_hole(in, offset);
+	return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_trhrough);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, int new_size)
+{
+
+	struct yaffs_dev *dev = in->my_dev;
+	int old_size = in->variant.file_variant.file_size;
+
+	int last_del = 1 + (old_size - 1) / dev->data_bytes_per_chunk;
+
+	int start_del = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
+	    dev->data_bytes_per_chunk;
+	int i;
+	int chunk_id;
+
+	/* Delete backwards so that we don't end up with holes if
+	 * power is lost part-way through the operation.
+	 */
+	for (i = last_del; i >= start_del; i--) {
+		/* NB this could be optimised somewhat,
+		 * eg. could retrieve the tags and write them without
+		 * using yaffs_chunk_del
+		 */
+
+		chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+		if (chunk_id > 0) {
+			if (chunk_id <
+			    (dev->internal_start_block *
+			     dev->param.chunks_per_block)
+			    || chunk_id >=
+			    ((dev->internal_end_block +
+			      1) * dev->param.chunks_per_block)) {
+				yaffs_trace(YAFFS_TRACE_ALWAYS,
+					"Found daft chunk_id %d for %d",
+					chunk_id, i);
+			} else {
+				in->n_data_chunks--;
+				yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+			}
+		}
+	}
+
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+	int new_full;
+	u32 new_partial;
+	struct yaffs_dev *dev = obj->my_dev;
+
+	yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+	yaffs_prune_chunks(obj, new_size);
+
+	if (new_partial != 0) {
+		int last_chunk = 1 + new_full;
+		u8 *local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+		/* Rewrite the last chunk with its new size and zero pad */
+		yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+		memset(local_buffer + new_partial, 0,
+		       dev->data_bytes_per_chunk - new_partial);
+
+		yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+				  new_partial, 1);
+
+		yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+	}
+
+	obj->variant.file_variant.file_size = new_size;
+
+	yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+	struct yaffs_dev *dev = in->my_dev;
+	int old_size = in->variant.file_variant.file_size;
+
+	yaffs_flush_file_cache(in);
+	yaffs_invalidate_whole_cache(in);
+
+	yaffs_check_gc(dev, 0);
+
+	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+		return YAFFS_FAIL;
+
+	if (new_size == old_size)
+		return YAFFS_OK;
+
+	if (new_size > old_size) {
+		yaffs2_handle_hole(in, new_size);
+		in->variant.file_variant.file_size = new_size;
+	} else {
+		/* new_size < old_size */
+		yaffs_resize_file_down(in, new_size);
+	}
+
+	/* Write a new object header to reflect the resize.
+	 * show we've shrunk the file, if need be
+	 * Do this only if the file is not in the deleted directories
+	 * and is not shadowed.
+	 */
+	if (in->parent &&
+	    !in->is_shadowed &&
+	    in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+	    in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+		yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+	return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+	int ret_val;
+	if (in->dirty) {
+		yaffs_flush_file_cache(in);
+		if (data_sync)	/* Only sync data */
+			ret_val = YAFFS_OK;
+		else {
+			if (update_time)
+				yaffs_load_current_time(in, 0, 0);
+
+			ret_val = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
+				   0) ? YAFFS_OK : YAFFS_FAIL;
+		}
+	} else {
+		ret_val = YAFFS_OK;
+	}
+
+	return ret_val;
+
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+
+	int ret_val;
+	int del_now = 0;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (!in->my_inode)
+		del_now = 1;
+
+	if (del_now) {
+		ret_val =
+		    yaffs_change_obj_name(in, in->my_dev->del_dir,
+					  _Y("deleted"), 0, 0);
+		yaffs_trace(YAFFS_TRACE_TRACING,
+			"yaffs: immediate deletion of file %d",
+			in->obj_id);
+		in->deleted = 1;
+		in->my_dev->n_deleted_files++;
+		if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+			yaffs_resize_file(in, 0);
+		yaffs_soft_del_file(in);
+	} else {
+		ret_val =
+		    yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+					  _Y("unlinked"), 0, 0);
+	}
+
+	return ret_val;
+}
+
+int yaffs_del_file(struct yaffs_obj *in)
+{
+	int ret_val = YAFFS_OK;
+	int deleted;		/* Need to cache value on stack if in is freed */
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+		yaffs_resize_file(in, 0);
+
+	if (in->n_data_chunks > 0) {
+		/* Use soft deletion if there is data in the file.
+		 * That won't be the case if it has been resized to zero.
+		 */
+		if (!in->unlinked)
+			ret_val = yaffs_unlink_file_if_needed(in);
+
+		deleted = in->deleted;
+
+		if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+			in->deleted = 1;
+			deleted = 1;
+			in->my_dev->n_deleted_files++;
+			yaffs_soft_del_file(in);
+		}
+		return deleted ? YAFFS_OK : YAFFS_FAIL;
+	} else {
+		/* The file has no data chunks so we toss it immediately */
+		yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+		in->variant.file_variant.top = NULL;
+		yaffs_generic_obj_del(in);
+
+		return YAFFS_OK;
+	}
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+	return (obj &&
+	        obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+	        !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+	/* First check that the directory is empty. */
+	if (yaffs_is_non_empty_dir(obj))
+		return YAFFS_FAIL;
+
+	return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+	if (in->variant.symlink_variant.alias)
+		kfree(in->variant.symlink_variant.alias);
+	in->variant.symlink_variant.alias = NULL;
+
+	return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+	/* remove this hardlink from the list assocaited with the equivalent
+	 * object
+	 */
+	list_del_init(&in->hard_links);
+	return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+	int ret_val = -1;
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		ret_val = yaffs_del_file(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		if (!list_empty(&obj->variant.dir_variant.dirty)) {
+			yaffs_trace(YAFFS_TRACE_BACKGROUND,
+				"Remove object %d from dirty directories",
+				obj->obj_id);
+			list_del_init(&obj->variant.dir_variant.dirty);
+		}
+		return yaffs_del_dir(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		ret_val = yaffs_del_symlink(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		ret_val = yaffs_del_link(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		ret_val = yaffs_generic_obj_del(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+		ret_val = 0;
+		break;		/* should not happen. */
+	}
+
+	return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+
+	int del_now = 0;
+
+	if (!obj->my_inode)
+		del_now = 1;
+
+	if (obj)
+		yaffs_update_parent(obj->parent);
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+		return yaffs_del_link(obj);
+	} else if (!list_empty(&obj->hard_links)) {
+		/* Curve ball: We're unlinking an object that has a hardlink.
+		 *
+		 * This problem arises because we are not strictly following
+		 * The Linux link/inode model.
+		 *
+		 * We can't really delete the object.
+		 * Instead, we do the following:
+		 * - Select a hardlink.
+		 * - Unhook it from the hard links
+		 * - Move it from its parent directory (so that the rename can work)
+		 * - Rename the object to the hardlink's name.
+		 * - Delete the hardlink
+		 */
+
+		struct yaffs_obj *hl;
+		struct yaffs_obj *parent;
+		int ret_val;
+		YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+		hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+				hard_links);
+
+		yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+		parent = hl->parent;
+
+		list_del_init(&hl->hard_links);
+
+		yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+		ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+		if (ret_val == YAFFS_OK)
+			ret_val = yaffs_generic_obj_del(hl);
+
+		return ret_val;
+
+	} else if (del_now) {
+		switch (obj->variant_type) {
+		case YAFFS_OBJECT_TYPE_FILE:
+			return yaffs_del_file(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+			list_del_init(&obj->variant.dir_variant.dirty);
+			return yaffs_del_dir(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+			return yaffs_del_symlink(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+			return yaffs_generic_obj_del(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+		default:
+			return YAFFS_FAIL;
+		}
+	} else if (yaffs_is_non_empty_dir(obj)) {
+		return YAFFS_FAIL;
+	} else {
+		return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+					     _Y("unlinked"), 0, 0);
+        }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+
+	if (obj && obj->unlink_allowed)
+		return yaffs_unlink_worker(obj);
+
+	return YAFFS_FAIL;
+
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name)
+{
+	struct yaffs_obj *obj;
+
+	obj = yaffs_find_by_name(dir, name);
+	return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+		     struct yaffs_obj *new_dir, const YCHAR * new_name)
+{
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_obj *existing_target = NULL;
+	int force = 0;
+	int result;
+	struct yaffs_dev *dev;
+
+	if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+		YBUG();
+	if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+		YBUG();
+
+	dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+	/* Special case for case insemsitive systems.
+	 * While look-up is case insensitive, the name isn't.
+	 * Therefore we might want to change x.txt to X.txt
+	 */
+	if (old_dir == new_dir && 
+		old_name && new_name && 
+		strcmp(old_name, new_name) == 0)
+		force = 1;
+#endif
+
+	if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+	    YAFFS_MAX_NAME_LENGTH)
+		/* ENAMETOOLONG */
+		return YAFFS_FAIL;
+
+	if(old_name)
+		obj = yaffs_find_by_name(old_dir, old_name);
+	else{
+		obj = old_dir;
+		old_dir = obj->parent;
+	}
+
+
+	if (obj && obj->rename_allowed) {
+
+		/* Now do the handling for an existing target, if there is one */
+
+		existing_target = yaffs_find_by_name(new_dir, new_name);
+		if (yaffs_is_non_empty_dir(existing_target)){
+			return YAFFS_FAIL;	/* ENOTEMPTY */
+		} else if (existing_target && existing_target != obj) {
+			/* Nuke the target first, using shadowing,
+			 * but only if it isn't the same object.
+			 *
+			 * Note we must disable gc otherwise it can mess up the shadowing.
+			 *
+			 */
+			dev->gc_disable = 1;
+			yaffs_change_obj_name(obj, new_dir, new_name, force,
+					      existing_target->obj_id);
+			existing_target->is_shadowed = 1;
+			yaffs_unlink_obj(existing_target);
+			dev->gc_disable = 0;
+		}
+
+		result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+		yaffs_update_parent(old_dir);
+		if (new_dir != old_dir)
+			yaffs_update_parent(new_dir);
+
+		return result;
+	}
+	return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+			       int backward_scanning)
+{
+	struct yaffs_obj *obj;
+
+	if (!backward_scanning) {
+		/* Handle YAFFS1 forward scanning case
+		 * For YAFFS1 we always do the deletion
+		 */
+
+	} else {
+		/* Handle YAFFS2 case (backward scanning)
+		 * If the shadowed object exists then ignore.
+		 */
+		obj = yaffs_find_by_number(dev, obj_id);
+		if (obj)
+			return;
+	}
+
+	/* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+	 * We put it in unlinked dir to be cleaned up after the scanning
+	 */
+	obj =
+	    yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+	if (!obj)
+		return;
+	obj->is_shadowed = 1;
+	yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+	obj->variant.file_variant.shrink_size = 0;
+	obj->valid = 1;		/* So that we don't read any other info for this file */
+
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list)
+{
+	struct yaffs_obj *hl;
+	struct yaffs_obj *in;
+
+	while (hard_list) {
+		hl = hard_list;
+		hard_list = (struct yaffs_obj *)(hard_list->hard_links.next);
+
+		in = yaffs_find_by_number(dev,
+					  hl->variant.
+					  hardlink_variant.equiv_id);
+
+		if (in) {
+			/* Add the hardlink pointers */
+			hl->variant.hardlink_variant.equiv_obj = in;
+			list_add(&hl->hard_links, &in->hard_links);
+		} else {
+			/* Todo Need to report/handle this better.
+			 * Got a problem... hardlink to a non-existant object
+			 */
+			hl->variant.hardlink_variant.equiv_obj = NULL;
+			INIT_LIST_HEAD(&hl->hard_links);
+
+		}
+	}
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+	/*
+	 *  Sort out state of unlinked and deleted objects after scanning.
+	 */
+	struct list_head *i;
+	struct list_head *n;
+	struct yaffs_obj *l;
+
+	if (dev->read_only)
+		return;
+
+	/* Soft delete all the unlinked files */
+	list_for_each_safe(i, n,
+			   &dev->unlinked_dir->variant.dir_variant.children) {
+		if (i) {
+			l = list_entry(i, struct yaffs_obj, siblings);
+			yaffs_del_obj(l);
+		}
+	}
+
+	list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+		if (i) {
+			l = list_entry(i, struct yaffs_obj, siblings);
+			yaffs_del_obj(l);
+		}
+	}
+
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ *  This code assumes that we don't ever change the current relationships between
+ *  directories:
+ *   root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ *   lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been deleted
+ * leaving the object "hanging" without being rooted in the directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+	return (obj == dev->del_dir ||
+		obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_obj *parent;
+	int i;
+	struct list_head *lh;
+	struct list_head *n;
+	int depth_limit;
+	int hanging;
+
+	if (dev->read_only)
+		return;
+
+	/* Iterate through the objects in each hash entry,
+	 * looking at each object.
+	 * Make sure it is rooted.
+	 */
+
+	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+		list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+			if (lh) {
+				obj =
+				    list_entry(lh, struct yaffs_obj, hash_link);
+				parent = obj->parent;
+
+				if (yaffs_has_null_parent(dev, obj)) {
+					/* These directories are not hanging */
+					hanging = 0;
+				} else if (!parent
+					   || parent->variant_type !=
+					   YAFFS_OBJECT_TYPE_DIRECTORY) {
+					hanging = 1;
+				} else if (yaffs_has_null_parent(dev, parent)) {
+					hanging = 0;
+				} else {
+					/*
+					 * Need to follow the parent chain to see if it is hanging.
+					 */
+					hanging = 0;
+					depth_limit = 100;
+
+					while (parent != dev->root_dir &&
+					       parent->parent &&
+					       parent->parent->variant_type ==
+					       YAFFS_OBJECT_TYPE_DIRECTORY
+					       && depth_limit > 0) {
+						parent = parent->parent;
+						depth_limit--;
+					}
+					if (parent != dev->root_dir)
+						hanging = 1;
+				}
+				if (hanging) {
+					yaffs_trace(YAFFS_TRACE_SCAN,
+						"Hanging object %d moved to lost and found",
+						obj->obj_id);
+					yaffs_add_obj_to_dir(dev->lost_n_found,
+							     obj);
+				}
+			}
+		}
+	}
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+	struct yaffs_obj *obj;
+	struct list_head *lh;
+	struct list_head *n;
+
+	if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+		YBUG();
+
+	list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+		if (lh) {
+			obj = list_entry(lh, struct yaffs_obj, siblings);
+			if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+				yaffs_del_dir_contents(obj);
+
+			yaffs_trace(YAFFS_TRACE_SCAN,
+				"Deleting lost_found object %d",
+				obj->obj_id);
+
+			/* Need to use UnlinkObject since Delete would not handle
+			 * hardlinked objects correctly.
+			 */
+			yaffs_unlink_obj(obj);
+		}
+	}
+
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+	yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+				     const YCHAR * name)
+{
+	int sum;
+
+	struct list_head *i;
+	YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+	struct yaffs_obj *l;
+
+	if (!name)
+		return NULL;
+
+	if (!directory) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: yaffs_find_by_name: null pointer directory"
+			);
+		YBUG();
+		return NULL;
+	}
+	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: yaffs_find_by_name: non-directory"
+			);
+		YBUG();
+	}
+
+	sum = yaffs_calc_name_sum(name);
+
+	list_for_each(i, &directory->variant.dir_variant.children) {
+		if (i) {
+			l = list_entry(i, struct yaffs_obj, siblings);
+
+			if (l->parent != directory)
+				YBUG();
+
+			yaffs_check_obj_details_loaded(l);
+
+			/* Special case for lost-n-found */
+			if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+				if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+					return l;
+			} else if (l->sum == sum
+				   || l->hdr_chunk <= 0) {
+				/* LostnFound chunk called Objxxx
+				 * Do a real check
+				 */
+				yaffs_get_obj_name(l, buffer,
+						   YAFFS_MAX_NAME_LENGTH + 1);
+				if (strncmp
+				    (name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+					return l;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+	if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+		/* We want the object id of the equivalent object, not this one */
+		obj = obj->variant.hardlink_variant.equiv_obj;
+		yaffs_check_obj_details_loaded(obj);
+	}
+	return obj;
+}
+
+/*
+ *  A note or two on object names.
+ *  * If the object name is missing, we then make one up in the form objnnn
+ *
+ *  * ASCII names are stored in the object header's name field from byte zero
+ *  * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ *  - If the name can fit in the ASCII character space then they are saved as 
+ *    ascii names as per above.
+ *  - If the name needs Unicode then the name is saved in Unicode
+ *    starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR * name,
+				int buffer_size)
+{
+	/* Create an object name if we could not find one. */
+	if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+		YCHAR local_name[20];
+		YCHAR num_string[20];
+		YCHAR *x = &num_string[19];
+		unsigned v = obj->obj_id;
+		num_string[19] = 0;
+		while (v > 0) {
+			x--;
+			*x = '0' + (v % 10);
+			v /= 10;
+		}
+		/* make up a name */
+		strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+		strcat(local_name, x);
+		strncpy(name, local_name, buffer_size - 1);
+	}
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size)
+{
+	memset(name, 0, buffer_size * sizeof(YCHAR));
+
+	yaffs_check_obj_details_loaded(obj);
+
+	if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+		strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+	}
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+	else if (obj->short_name[0]) {
+		strcpy(name, obj->short_name);
+	}
+#endif
+	else if (obj->hdr_chunk > 0) {
+		int result;
+		u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
+
+		struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+		memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+		if (obj->hdr_chunk > 0) {
+			result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+							  obj->hdr_chunk,
+							  buffer, NULL);
+		}
+		yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+					buffer_size);
+
+		yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
+	}
+
+	yaffs_fix_null_name(obj, name, buffer_size);
+
+	return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+int yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+	/* Dereference any hard linking */
+	obj = yaffs_get_equivalent_obj(obj);
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+		return obj->variant.file_variant.file_size;
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+		if (!obj->variant.symlink_variant.alias)
+			return 0;
+		return strnlen(obj->variant.symlink_variant.alias,
+				     YAFFS_MAX_ALIAS_LENGTH);
+	} else {
+		/* Only a directory should drop through to here */
+		return obj->my_dev->data_bytes_per_chunk;
+	}
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+	int count = 0;
+	struct list_head *i;
+
+	if (!obj->unlinked)
+		count++;	/* the object itself */
+
+	list_for_each(i, &obj->hard_links)
+	    count++;		/* add the hard links; */
+
+	return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+	obj = yaffs_get_equivalent_obj(obj);
+
+	return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+	obj = yaffs_get_equivalent_obj(obj);
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		return DT_REG;
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		return DT_DIR;
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		return DT_LNK;
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		return DT_REG;
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		if (S_ISFIFO(obj->yst_mode))
+			return DT_FIFO;
+		if (S_ISCHR(obj->yst_mode))
+			return DT_CHR;
+		if (S_ISBLK(obj->yst_mode))
+			return DT_BLK;
+		if (S_ISSOCK(obj->yst_mode))
+			return DT_SOCK;
+	default:
+		return DT_REG;
+		break;
+	}
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+	obj = yaffs_get_equivalent_obj(obj);
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+		return yaffs_clone_str(obj->variant.symlink_variant.alias);
+	else
+		return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
+{
+
+	/* Common functions, gotta have */
+	if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+		return 0;
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+	/* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+	if (dev->param.write_chunk_tags_fn &&
+	    dev->param.read_chunk_tags_fn &&
+	    !dev->param.write_chunk_fn &&
+	    !dev->param.read_chunk_fn &&
+	    dev->param.bad_block_fn && dev->param.query_block_fn)
+		return 1;
+#endif
+
+	/* Can use the "spare" style interface for yaffs1 */
+	if (!dev->param.is_yaffs2 &&
+	    !dev->param.write_chunk_tags_fn &&
+	    !dev->param.read_chunk_tags_fn &&
+	    dev->param.write_chunk_fn &&
+	    dev->param.read_chunk_fn &&
+	    !dev->param.bad_block_fn && !dev->param.query_block_fn)
+		return 1;
+
+	return 0;		/* bad */
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+	/* Initialise the unlinked, deleted, root and lost and found directories */
+
+	dev->lost_n_found = dev->root_dir = NULL;
+	dev->unlinked_dir = dev->del_dir = NULL;
+
+	dev->unlinked_dir =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+	dev->del_dir =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+	dev->root_dir =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+				  YAFFS_ROOT_MODE | S_IFDIR);
+	dev->lost_n_found =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+				  YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+	if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+	    && dev->del_dir) {
+		yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+		return YAFFS_OK;
+	}
+
+	return YAFFS_FAIL;
+}
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+	int init_failed = 0;
+	unsigned x;
+	int bits;
+
+	yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()" );
+
+	/* Check stuff that must be set */
+
+	if (!dev) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"yaffs: Need a device"
+			);
+		return YAFFS_FAIL;
+	}
+
+	dev->internal_start_block = dev->param.start_block;
+	dev->internal_end_block = dev->param.end_block;
+	dev->block_offset = 0;
+	dev->chunk_offset = 0;
+	dev->n_free_chunks = 0;
+
+	dev->gc_block = 0;
+
+	if (dev->param.start_block == 0) {
+		dev->internal_start_block = dev->param.start_block + 1;
+		dev->internal_end_block = dev->param.end_block + 1;
+		dev->block_offset = 1;
+		dev->chunk_offset = dev->param.chunks_per_block;
+	}
+
+	/* Check geometry parameters. */
+
+	if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+		dev->param.total_bytes_per_chunk < 1024) ||
+		(!dev->param.is_yaffs2 &&
+			dev->param.total_bytes_per_chunk < 512) ||
+		(dev->param.inband_tags && !dev->param.is_yaffs2) ||
+		 dev->param.chunks_per_block < 2 ||
+		 dev->param.n_reserved_blocks < 2 ||
+		dev->internal_start_block <= 0 || 
+		dev->internal_end_block <= 0 || 
+		dev->internal_end_block <= 
+		(dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+		) {
+		/* otherwise it is too small */
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+			dev->param.total_bytes_per_chunk,
+			dev->param.is_yaffs2 ? "2" : "",
+			dev->param.inband_tags);
+		return YAFFS_FAIL;
+	}
+
+	if (yaffs_init_nand(dev) != YAFFS_OK) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+		return YAFFS_FAIL;
+	}
+
+	/* Sort out space for inband tags, if required */
+	if (dev->param.inband_tags)
+		dev->data_bytes_per_chunk =
+		    dev->param.total_bytes_per_chunk -
+		    sizeof(struct yaffs_packed_tags2_tags_only);
+	else
+		dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+	/* Got the right mix of functions? */
+	if (!yaffs_check_dev_fns(dev)) {
+		/* Function missing */
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"device function(s) missing or wrong");
+
+		return YAFFS_FAIL;
+	}
+
+	if (dev->is_mounted) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+		return YAFFS_FAIL;
+	}
+
+	/* Finished with most checks. One or two more checks happen later on too. */
+
+	dev->is_mounted = 1;
+
+	/* OK now calculate a few things for the device */
+
+	/*
+	 *  Calculate all the chunk size manipulation numbers:
+	 */
+	x = dev->data_bytes_per_chunk;
+	/* We always use dev->chunk_shift and dev->chunk_div */
+	dev->chunk_shift = calc_shifts(x);
+	x >>= dev->chunk_shift;
+	dev->chunk_div = x;
+	/* We only use chunk mask if chunk_div is 1 */
+	dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+	/*
+	 * Calculate chunk_grp_bits.
+	 * We need to find the next power of 2 > than internal_end_block
+	 */
+
+	x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+	bits = calc_shifts_ceiling(x);
+
+	/* Set up tnode width if wide tnodes are enabled. */
+	if (!dev->param.wide_tnodes_disabled) {
+		/* bits must be even so that we end up with 32-bit words */
+		if (bits & 1)
+			bits++;
+		if (bits < 16)
+			dev->tnode_width = 16;
+		else
+			dev->tnode_width = bits;
+	} else {
+		dev->tnode_width = 16;
+        }
+
+	dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+	/* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+	 * so if the bitwidth of the
+	 * chunk range we're using is greater than 16 we need
+	 * to figure out chunk shift and chunk_grp_size
+	 */
+
+	if (bits <= dev->tnode_width)
+		dev->chunk_grp_bits = 0;
+	else
+		dev->chunk_grp_bits = bits - dev->tnode_width;
+
+	dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+	if (dev->tnode_size < sizeof(struct yaffs_tnode))
+		dev->tnode_size = sizeof(struct yaffs_tnode);
+
+	dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+	if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+		/* We have a problem because the soft delete won't work if
+		 * the chunk group size > chunks per block.
+		 * This can be remedied by using larger "virtual blocks".
+		 */
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+		return YAFFS_FAIL;
+	}
+
+	/* OK, we've finished verifying the device, lets continue with initialisation */
+
+	/* More device initialisation */
+	dev->all_gcs = 0;
+	dev->passive_gc_count = 0;
+	dev->oldest_dirty_gc_count = 0;
+	dev->bg_gcs = 0;
+	dev->gc_block_finder = 0;
+	dev->buffered_block = -1;
+	dev->doing_buffered_block_rewrite = 0;
+	dev->n_deleted_files = 0;
+	dev->n_bg_deletions = 0;
+	dev->n_unlinked_files = 0;
+	dev->n_ecc_fixed = 0;
+	dev->n_ecc_unfixed = 0;
+	dev->n_tags_ecc_fixed = 0;
+	dev->n_tags_ecc_unfixed = 0;
+	dev->n_erase_failures = 0;
+	dev->n_erased_blocks = 0;
+	dev->gc_disable = 0;
+	dev->has_pending_prioritised_gc = 1;	/* Assume the worst for now, will get fixed on first GC */
+	INIT_LIST_HEAD(&dev->dirty_dirs);
+	dev->oldest_dirty_seq = 0;
+	dev->oldest_dirty_block = 0;
+
+	/* Initialise temporary buffers and caches. */
+	if (!yaffs_init_tmp_buffers(dev))
+		init_failed = 1;
+
+	dev->cache = NULL;
+	dev->gc_cleanup_list = NULL;
+
+	if (!init_failed && dev->param.n_caches > 0) {
+		int i;
+		void *buf;
+		int cache_bytes =
+		    dev->param.n_caches * sizeof(struct yaffs_cache);
+
+		if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+			dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+		dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+		buf = (u8 *) dev->cache;
+
+		if (dev->cache)
+			memset(dev->cache, 0, cache_bytes);
+
+		for (i = 0; i < dev->param.n_caches && buf; i++) {
+			dev->cache[i].object = NULL;
+			dev->cache[i].last_use = 0;
+			dev->cache[i].dirty = 0;
+			dev->cache[i].data = buf =
+			    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+		}
+		if (!buf)
+			init_failed = 1;
+
+		dev->cache_last_use = 0;
+	}
+
+	dev->cache_hits = 0;
+
+	if (!init_failed) {
+		dev->gc_cleanup_list =
+		    kmalloc(dev->param.chunks_per_block * sizeof(u32),
+					GFP_NOFS);
+		if (!dev->gc_cleanup_list)
+			init_failed = 1;
+	}
+
+	if (dev->param.is_yaffs2)
+		dev->param.use_header_file_size = 1;
+
+	if (!init_failed && !yaffs_init_blocks(dev))
+		init_failed = 1;
+
+	yaffs_init_tnodes_and_objs(dev);
+
+	if (!init_failed && !yaffs_create_initial_dir(dev))
+		init_failed = 1;
+
+	if (!init_failed) {
+		/* Now scan the flash. */
+		if (dev->param.is_yaffs2) {
+			if (yaffs2_checkpt_restore(dev)) {
+				yaffs_check_obj_details_loaded(dev->root_dir);
+				yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+					"yaffs: restored from checkpoint"
+					);
+			} else {
+
+				/* Clean up the mess caused by an aborted checkpoint load
+				 * and scan backwards.
+				 */
+				yaffs_deinit_blocks(dev);
+
+				yaffs_deinit_tnodes_and_objs(dev);
+
+				dev->n_erased_blocks = 0;
+				dev->n_free_chunks = 0;
+				dev->alloc_block = -1;
+				dev->alloc_page = -1;
+				dev->n_deleted_files = 0;
+				dev->n_unlinked_files = 0;
+				dev->n_bg_deletions = 0;
+
+				if (!init_failed && !yaffs_init_blocks(dev))
+					init_failed = 1;
+
+				yaffs_init_tnodes_and_objs(dev);
+
+				if (!init_failed
+				    && !yaffs_create_initial_dir(dev))
+					init_failed = 1;
+
+				if (!init_failed && !yaffs2_scan_backwards(dev))
+					init_failed = 1;
+			}
+		} else if (!yaffs1_scan(dev)) {
+			init_failed = 1;
+                }
+
+		yaffs_strip_deleted_objs(dev);
+		yaffs_fix_hanging_objs(dev);
+		if (dev->param.empty_lost_n_found)
+			yaffs_empty_l_n_f(dev);
+	}
+
+	if (init_failed) {
+		/* Clean up the mess */
+		yaffs_trace(YAFFS_TRACE_TRACING,
+		  "yaffs: yaffs_guts_initialise() aborted.");
+
+		yaffs_deinitialise(dev);
+		return YAFFS_FAIL;
+	}
+
+	/* Zero out stats */
+	dev->n_page_reads = 0;
+	dev->n_page_writes = 0;
+	dev->n_erasures = 0;
+	dev->n_gc_copies = 0;
+	dev->n_retired_writes = 0;
+
+	dev->n_retired_blocks = 0;
+
+	yaffs_verify_free_chunks(dev);
+	yaffs_verify_blocks(dev);
+
+	/* Clean up any aborted checkpoint data */
+	if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+		yaffs2_checkpt_invalidate(dev);
+
+	yaffs_trace(YAFFS_TRACE_TRACING,
+	  "yaffs: yaffs_guts_initialise() done.");
+	return YAFFS_OK;
+
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+	if (dev->is_mounted) {
+		int i;
+
+		yaffs_deinit_blocks(dev);
+		yaffs_deinit_tnodes_and_objs(dev);
+		if (dev->param.n_caches > 0 && dev->cache) {
+
+			for (i = 0; i < dev->param.n_caches; i++) {
+				if (dev->cache[i].data)
+					kfree(dev->cache[i].data);
+				dev->cache[i].data = NULL;
+			}
+
+			kfree(dev->cache);
+			dev->cache = NULL;
+		}
+
+		kfree(dev->gc_cleanup_list);
+
+		for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+			kfree(dev->temp_buffer[i].buffer);
+
+		dev->is_mounted = 0;
+
+		if (dev->param.deinitialise_flash_fn)
+			dev->param.deinitialise_flash_fn(dev);
+	}
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+	int n_free = 0;
+	int b;
+
+	struct yaffs_block_info *blk;
+
+	blk = dev->block_info;
+	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+		switch (blk->block_state) {
+		case YAFFS_BLOCK_STATE_EMPTY:
+		case YAFFS_BLOCK_STATE_ALLOCATING:
+		case YAFFS_BLOCK_STATE_COLLECTING:
+		case YAFFS_BLOCK_STATE_FULL:
+			n_free +=
+			    (dev->param.chunks_per_block - blk->pages_in_use +
+			     blk->soft_del_pages);
+			break;
+		default:
+			break;
+		}
+		blk++;
+	}
+
+	return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+	/* This is what we report to the outside world */
+
+	int n_free;
+	int n_dirty_caches;
+	int blocks_for_checkpt;
+	int i;
+
+	n_free = dev->n_free_chunks;
+	n_free += dev->n_deleted_files;
+
+	/* Now count the number of dirty chunks in the cache and subtract those */
+
+	for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+		if (dev->cache[i].dirty)
+			n_dirty_caches++;
+	}
+
+	n_free -= n_dirty_caches;
+
+	n_free -=
+	    ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+	/* Now we figure out how much to reserve for the checkpoint and report that... */
+	blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+	n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+	if (n_free < 0)
+		n_free = 0;
+
+	return n_free;
+
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 0000000..307eba2
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,915 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK	1
+#define YAFFS_FAIL  0
+
+/* Give us a  Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xFF
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC			0x5941FF53
+
+#define YAFFS_NTNODES_LEVEL0	  	16
+#define YAFFS_TNODES_LEVEL0_BITS	4
+#define YAFFS_TNODES_LEVEL0_MASK	0xf
+
+#define YAFFS_NTNODES_INTERNAL 		(YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS 	(YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK	0x7
+#define YAFFS_TNODES_MAX_LEVEL		6
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+#define YAFFS_BYTES_PER_SPARE		16
+#define YAFFS_BYTES_PER_CHUNK		512
+#define YAFFS_CHUNK_SIZE_SHIFT		9
+#define YAFFS_CHUNKS_PER_BLOCK		32
+#define YAFFS_BYTES_PER_BLOCK		(YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+#endif
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 	1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE	32
+
+#define YAFFS_MAX_CHUNK_ID		0x000FFFFF
+
+#define YAFFS_ALLOCATION_NOBJECTS	100
+#define YAFFS_ALLOCATION_NTNODES	100
+#define YAFFS_ALLOCATION_NLINKS		100
+
+#define YAFFS_NOBJECT_BUCKETS		256
+
+#define YAFFS_OBJECT_SPACE		0x40000
+#define YAFFS_MAX_OBJECT_ID		(YAFFS_OBJECT_SPACE -1)
+
+#define YAFFS_CHECKPOINT_VERSION 	4
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH		127
+#define YAFFS_MAX_ALIAS_LENGTH		79
+#else
+#define YAFFS_MAX_NAME_LENGTH		255
+#define YAFFS_MAX_ALIAS_LENGTH		159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH		15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT		1
+#define YAFFS_OBJECTID_LOSTNFOUND	2
+#define YAFFS_OBJECTID_UNLINKED		3
+#define YAFFS_OBJECTID_DELETED		4
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_SB_HEADER	0x10
+#define YAFFS_OBJECTID_CHECKPOINT_DATA	0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA  0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES	20
+
+#define YAFFS_N_TEMP_BUFFERS		6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS		(5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER	0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER	0xEFFFFF00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK	0xFFFF0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+	struct yaffs_obj *object;
+	int chunk_id;
+	int last_use;
+	int dirty;
+	int n_bytes;		/* Only valid if the cache is dirty */
+	int locked;		/* Can't push out or flush while locked. */
+	u8 *data;
+};
+
+/* Tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
+ * the structure size will get blown out.
+ */
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+struct yaffs_tags {
+	unsigned chunk_id:20;
+	unsigned serial_number:2;
+	unsigned n_bytes_lsb:10;
+	unsigned obj_id:18;
+	unsigned ecc:12;
+	unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+	struct yaffs_tags as_tags;
+	u8 as_bytes[8];
+};
+
+#endif
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+	YAFFS_ECC_RESULT_UNKNOWN,
+	YAFFS_ECC_RESULT_NO_ERROR,
+	YAFFS_ECC_RESULT_FIXED,
+	YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+	YAFFS_OBJECT_TYPE_UNKNOWN,
+	YAFFS_OBJECT_TYPE_FILE,
+	YAFFS_OBJECT_TYPE_SYMLINK,
+	YAFFS_OBJECT_TYPE_DIRECTORY,
+	YAFFS_OBJECT_TYPE_HARDLINK,
+	YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+
+	unsigned validity0;
+	unsigned chunk_used;	/*  Status of the chunk: used or unused */
+	unsigned obj_id;	/* If 0 then this is not part of an object (unused) */
+	unsigned chunk_id;	/* If 0 then this is a header, else a data chunk */
+	unsigned n_bytes;	/* Only valid for data chunks */
+
+	/* The following stuff only has meaning when we read */
+	enum yaffs_ecc_result ecc_result;
+	unsigned block_bad;
+
+	/* YAFFS 1 stuff */
+	unsigned is_deleted;	/* The chunk is marked deleted */
+	unsigned serial_number;	/* Yaffs1 2-bit serial number */
+
+	/* YAFFS2 stuff */
+	unsigned seq_number;	/* The sequence number of this block */
+
+	/* Extra info if this is an object header (YAFFS2 only) */
+
+	unsigned extra_available;	/* There is extra info available if this is not zero */
+	unsigned extra_parent_id;	/* The parent object */
+	unsigned extra_is_shrink;	/* Is it a shrink header? */
+	unsigned extra_shadows;	/* Does this shadow another object? */
+
+	enum yaffs_obj_type extra_obj_type;	/* What object type? */
+
+	unsigned extra_length;	/* Length if it is a file */
+	unsigned extra_equiv_id;	/* Equivalent object Id if it is a hard link */
+
+	unsigned validity1;
+
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+	u8 tb0;
+	u8 tb1;
+	u8 tb2;
+	u8 tb3;
+	u8 page_status;		/* set to 0 to delete the chunk */
+	u8 block_status;
+	u8 tb4;
+	u8 tb5;
+	u8 ecc1[3];
+	u8 tb6;
+	u8 tb7;
+	u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+	struct yaffs_spare spare;
+	int eccres1;
+	int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+	YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+	YAFFS_BLOCK_STATE_SCANNING,
+	/* Being scanned */
+
+	YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+	/* The block might have something on it (ie it is allocating or full, perhaps empty)
+	 * but it needs to be scanned to determine its true state.
+	 * This state is only valid during scanning.
+	 * NB We tolerate empty because the pre-scanner might be incapable of deciding
+	 * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
+	 */
+
+	YAFFS_BLOCK_STATE_EMPTY,
+	/* This block is empty */
+
+	YAFFS_BLOCK_STATE_ALLOCATING,
+	/* This block is partially allocated.
+	 * At least one page holds valid data.
+	 * This is the one currently being used for page
+	 * allocation. Should never be more than one of these.
+	 * If a block is only partially allocated at mount it is treated as full.
+	 */
+
+	YAFFS_BLOCK_STATE_FULL,
+	/* All the pages in this block have been allocated.
+	 * If a block was only partially allocated when mounted we treat
+	 * it as fully allocated.
+	 */
+
+	YAFFS_BLOCK_STATE_DIRTY,
+	/* The block was full and now all chunks have been deleted.
+	 * Erase me, reuse me.
+	 */
+
+	YAFFS_BLOCK_STATE_CHECKPOINT,
+	/* This block is assigned to holding checkpoint data. */
+
+	YAFFS_BLOCK_STATE_COLLECTING,
+	/* This block is being garbage collected */
+
+	YAFFS_BLOCK_STATE_DEAD
+	    /* This block has failed and is not in use */
+};
+
+#define	YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+	int soft_del_pages:10;	/* number of soft deleted pages */
+	int pages_in_use:10;	/* number of pages in use */
+	unsigned block_state:4;	/* One of the above block states. NB use unsigned because enum is sometimes an int */
+	u32 needs_retiring:1;	/* Data has failed on this block, need to get valid data off */
+	/* and retire the block. */
+	u32 skip_erased_check:1;	/* If this is set we can skip the erased check on this block */
+	u32 gc_prioritise:1;	/* An ECC check or blank check has failed on this block.
+				   It should be prioritised for GC */
+	u32 chunk_error_strikes:3;	/* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+#ifdef CONFIG_YAFFS_YAFFS2
+	u32 has_shrink_hdr:1;	/* This block has at least one shrink object header */
+	u32 seq_number;		/* block sequence number for yaffs2 */
+#endif
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+	enum yaffs_obj_type type;
+
+	/* Apply to everything  */
+	int parent_obj_id;
+	u16 sum_no_longer_used;	/* checksum of name. No longer used */
+	YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+	/* The following apply to directories, files, symlinks - not hard links */
+	u32 yst_mode;		/* protection */
+
+	u32 yst_uid;
+	u32 yst_gid;
+	u32 yst_atime;
+	u32 yst_mtime;
+	u32 yst_ctime;
+
+	/* File size  applies to files only */
+	int file_size;
+
+	/* Equivalent object id applies to hard links only. */
+	int equiv_id;
+
+	/* Alias is for symlinks only. */
+	YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+	u32 yst_rdev;		/* device stuff for block and char devices (major/min) */
+
+	u32 win_ctime[2];
+	u32 win_atime[2];
+	u32 win_mtime[2];
+
+	u32 inband_shadowed_obj_id;
+	u32 inband_is_shrink;
+
+	u32 reserved[2];
+	int shadows_obj;	/* This object header shadows the specified object if > 0 */
+
+	/* is_shrink applies to object headers written when we shrink the file (ie resize) */
+	u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+	struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------  Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+	u32 file_size;
+	u32 scanned_size;
+	u32 shrink_size;
+	int top_level;
+	struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+	struct list_head children;	/* list of child links */
+	struct list_head dirty;	/* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+	YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+	struct yaffs_obj *equiv_obj;
+	u32 equiv_id;
+};
+
+union yaffs_obj_var {
+	struct yaffs_file_var file_variant;
+	struct yaffs_dir_var dir_variant;
+	struct yaffs_symlink_var symlink_variant;
+	struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+	u8 deleted:1;		/* This should only apply to unlinked files. */
+	u8 soft_del:1;		/* it has also been soft deleted */
+	u8 unlinked:1;		/* An unlinked file. The file should be in the unlinked directory. */
+	u8 fake:1;		/* A fake object has no presence on NAND. */
+	u8 rename_allowed:1;	/* Some objects are not allowed to be renamed. */
+	u8 unlink_allowed:1;
+	u8 dirty:1;		/* the object needs to be written to flash */
+	u8 valid:1;		/* When the file system is being loaded up, this
+				 * object might be created before the data
+				 * is available (ie. file data records appear before the header).
+				 */
+	u8 lazy_loaded:1;	/* This object has been lazy loaded and is missing some detail */
+
+	u8 defered_free:1;	/* For Linux kernel. Object is removed from NAND, but is
+				 * still in the inode cache. Free of object is defered.
+				 * until the inode is released.
+				 */
+	u8 being_created:1;	/* This object is still being created so skip some checks. */
+	u8 is_shadowed:1;	/* This object is shadowed on the way to being renamed. */
+
+	u8 xattr_known:1;	/* We know if this has object has xattribs or not. */
+	u8 has_xattr:1;		/* This object has xattribs. Valid if xattr_known. */
+
+	u8 serial;		/* serial number of chunk in NAND. Cached here */
+	u16 sum;		/* sum of the name to speed searching */
+
+	struct yaffs_dev *my_dev;	/* The device I'm on */
+
+	struct list_head hash_link;	/* list of objects in this hash bucket */
+
+	struct list_head hard_links;	/* all the equivalent hard linked objects */
+
+	/* directory structure stuff */
+	/* also used for linking up the free list */
+	struct yaffs_obj *parent;
+	struct list_head siblings;
+
+	/* Where's my object header in NAND? */
+	int hdr_chunk;
+
+	int n_data_chunks;	/* Number of data chunks attached to the file. */
+
+	u32 obj_id;		/* the object id value */
+
+	u32 yst_mode;
+
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+	YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+#endif
+
+#ifdef CONFIG_YAFFS_WINCE
+	u32 win_ctime[2];
+	u32 win_mtime[2];
+	u32 win_atime[2];
+#else
+	u32 yst_uid;
+	u32 yst_gid;
+	u32 yst_atime;
+	u32 yst_mtime;
+	u32 yst_ctime;
+#endif
+
+	u32 yst_rdev;
+
+	void *my_inode;
+
+	enum yaffs_obj_type variant_type;
+
+	union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+	struct list_head list;
+	int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+	int struct_type;
+	u32 obj_id;
+	u32 parent_id;
+	int hdr_chunk;
+	enum yaffs_obj_type variant_type:3;
+	u8 deleted:1;
+	u8 soft_del:1;
+	u8 unlinked:1;
+	u8 fake:1;
+	u8 rename_allowed:1;
+	u8 unlink_allowed:1;
+	u8 serial;
+	int n_data_chunks;
+	u32 size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few
+ */
+
+struct yaffs_buffer {
+	u8 *buffer;
+	int line;		/* track from whence this buffer was allocated */
+	int max_line;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+	const YCHAR *name;
+
+	/*
+	 * Entry parameters set up way early. Yaffs sets up the rest.
+	 * The structure should be zeroed out before use so that unused
+	 * and defualt values are zero.
+	 */
+
+	int inband_tags;	/* Use unband tags */
+	u32 total_bytes_per_chunk;	/* Should be >= 512, does not need to be a power of 2 */
+	int chunks_per_block;	/* does not need to be a power of 2 */
+	int spare_bytes_per_chunk;	/* spare area size */
+	int start_block;	/* Start block we're allowed to use */
+	int end_block;		/* End block we're allowed to use */
+	int n_reserved_blocks;	/* We want this tuneable so that we can reduce */
+	/* reserved blocks on NOR and RAM. */
+
+	int n_caches;		/* If <= 0, then short op caching is disabled, else
+				 * the number of short op caches (don't use too many).
+				 * 10 to 20 is a good bet.
+				 */
+	int use_nand_ecc;	/* Flag to decide whether or not to use NANDECC on data (yaffs1) */
+	int no_tags_ecc;	/* Flag to decide whether or not to do ECC on packed tags (yaffs2) */
+
+	int is_yaffs2;		/* Use yaffs2 mode on this device */
+
+	int empty_lost_n_found;	/* Auto-empty lost+found directory on mount */
+
+	int refresh_period;	/* How often we should check to do a block refresh */
+
+	/* Checkpoint control. Can be set before or after initialisation */
+	u8 skip_checkpt_rd;
+	u8 skip_checkpt_wr;
+
+	int enable_xattr;	/* Enable xattribs */
+
+	/* NAND access functions (Must be set before calling YAFFS) */
+
+	int (*write_chunk_fn) (struct yaffs_dev * dev,
+			       int nand_chunk, const u8 * data,
+			       const struct yaffs_spare * spare);
+	int (*read_chunk_fn) (struct yaffs_dev * dev,
+			      int nand_chunk, u8 * data,
+			      struct yaffs_spare * spare);
+	int (*erase_fn) (struct yaffs_dev * dev, int flash_block);
+	int (*initialise_flash_fn) (struct yaffs_dev * dev);
+	int (*deinitialise_flash_fn) (struct yaffs_dev * dev);
+
+#ifdef CONFIG_YAFFS_YAFFS2
+	int (*write_chunk_tags_fn) (struct yaffs_dev * dev,
+				    int nand_chunk, const u8 * data,
+				    const struct yaffs_ext_tags * tags);
+	int (*read_chunk_tags_fn) (struct yaffs_dev * dev,
+				   int nand_chunk, u8 * data,
+				   struct yaffs_ext_tags * tags);
+	int (*bad_block_fn) (struct yaffs_dev * dev, int block_no);
+	int (*query_block_fn) (struct yaffs_dev * dev, int block_no,
+			       enum yaffs_block_state * state,
+			       u32 * seq_number);
+#endif
+
+	/* The remove_obj_fn function must be supplied by OS flavours that
+	 * need it.
+	 * yaffs direct uses it to implement the faster readdir.
+	 * Linux uses it to protect the directory during unlocking.
+	 */
+	void (*remove_obj_fn) (struct yaffs_obj * obj);
+
+	/* Callback to mark the superblock dirty */
+	void (*sb_dirty_fn) (struct yaffs_dev * dev);
+
+	/*  Callback to control garbage collection. */
+	unsigned (*gc_control) (struct yaffs_dev * dev);
+
+	/* Debug control flags. Don't use unless you know what you're doing */
+	int use_header_file_size;	/* Flag to determine if we should use file sizes from the header */
+	int disable_lazy_load;	/* Disable lazy loading on this device */
+	int wide_tnodes_disabled;	/* Set to disable wide tnodes */
+	int disable_soft_del;	/* yaffs 1 only: Set to disable the use of softdeletion. */
+
+	int defered_dir_update;	/* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+	int auto_unicode;
+#endif
+	int always_check_erased;	/* Force chunk erased check always on */
+};
+
+struct yaffs_dev {
+	struct yaffs_param param;
+
+	/* Context storage. Holds extra OS specific data for this device */
+
+	void *os_context;
+	void *driver_context;
+
+	struct list_head dev_list;
+
+	/* Runtime parameters. Set up by YAFFS. */
+	int data_bytes_per_chunk;
+
+	/* Non-wide tnode stuff */
+	u16 chunk_grp_bits;	/* Number of bits that need to be resolved if
+				 * the tnodes are not wide enough.
+				 */
+	u16 chunk_grp_size;	/* == 2^^chunk_grp_bits */
+
+	/* Stuff to support wide tnodes */
+	u32 tnode_width;
+	u32 tnode_mask;
+	u32 tnode_size;
+
+	/* Stuff for figuring out file offset to chunk conversions */
+	u32 chunk_shift;	/* Shift value */
+	u32 chunk_div;		/* Divisor after shifting: 1 for power-of-2 sizes */
+	u32 chunk_mask;		/* Mask to use for power-of-2 case */
+
+	int is_mounted;
+	int read_only;
+	int is_checkpointed;
+
+	/* Stuff to support block offsetting to support start block zero */
+	int internal_start_block;
+	int internal_end_block;
+	int block_offset;
+	int chunk_offset;
+
+	/* Runtime checkpointing stuff */
+	int checkpt_page_seq;	/* running sequence number of checkpoint pages */
+	int checkpt_byte_count;
+	int checkpt_byte_offs;
+	u8 *checkpt_buffer;
+	int checkpt_open_write;
+	int blocks_in_checkpt;
+	int checkpt_cur_chunk;
+	int checkpt_cur_block;
+	int checkpt_next_block;
+	int *checkpt_block_list;
+	int checkpt_max_blocks;
+	u32 checkpt_sum;
+	u32 checkpt_xor;
+
+	int checkpoint_blocks_required;	/* Number of blocks needed to store current checkpoint set */
+
+	/* Block Info */
+	struct yaffs_block_info *block_info;
+	u8 *chunk_bits;		/* bitmap of chunks in use */
+	unsigned block_info_alt:1;	/* was allocated using alternative strategy */
+	unsigned chunk_bits_alt:1;	/* was allocated using alternative strategy */
+	int chunk_bit_stride;	/* Number of bytes of chunk_bits per block.
+				 * Must be consistent with chunks_per_block.
+				 */
+
+	int n_erased_blocks;
+	int alloc_block;	/* Current block being allocated off */
+	u32 alloc_page;
+	int alloc_block_finder;	/* Used to search for next allocation block */
+
+	/* Object and Tnode memory management */
+	void *allocator;
+	int n_obj;
+	int n_tnodes;
+
+	int n_hardlinks;
+
+	struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+	u32 bucket_finder;
+
+	int n_free_chunks;
+
+	/* Garbage collection control */
+	u32 *gc_cleanup_list;	/* objects to delete at the end of a GC. */
+	u32 n_clean_ups;
+
+	unsigned has_pending_prioritised_gc;	/* We think this device might have pending prioritised gcs */
+	unsigned gc_disable;
+	unsigned gc_block_finder;
+	unsigned gc_dirtiest;
+	unsigned gc_pages_in_use;
+	unsigned gc_not_done;
+	unsigned gc_block;
+	unsigned gc_chunk;
+	unsigned gc_skip;
+
+	/* Special directories */
+	struct yaffs_obj *root_dir;
+	struct yaffs_obj *lost_n_found;
+
+	/* Buffer areas for storing data to recover from write failures TODO
+	 *      u8            buffered_data[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+	 *      struct yaffs_spare buffered_spare[YAFFS_CHUNKS_PER_BLOCK];
+	 */
+
+	int buffered_block;	/* Which block is buffered here? */
+	int doing_buffered_block_rewrite;
+
+	struct yaffs_cache *cache;
+	int cache_last_use;
+
+	/* Stuff for background deletion and unlinked files. */
+	struct yaffs_obj *unlinked_dir;	/* Directory where unlinked and deleted files live. */
+	struct yaffs_obj *del_dir;	/* Directory where deleted objects are sent to disappear. */
+	struct yaffs_obj *unlinked_deletion;	/* Current file being background deleted. */
+	int n_deleted_files;	/* Count of files awaiting deletion; */
+	int n_unlinked_files;	/* Count of unlinked files. */
+	int n_bg_deletions;	/* Count of background deletions. */
+
+	/* Temporary buffer management */
+	struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+	int max_temp;
+	int temp_in_use;
+	int unmanaged_buffer_allocs;
+	int unmanaged_buffer_deallocs;
+
+	/* yaffs2 runtime stuff */
+	unsigned seq_number;	/* Sequence number of currently allocating block */
+	unsigned oldest_dirty_seq;
+	unsigned oldest_dirty_block;
+
+	/* Block refreshing */
+	int refresh_skip;	/* A skip down counter. Refresh happens when this gets to zero. */
+
+	/* Dirty directory handling */
+	struct list_head dirty_dirs;	/* List of dirty directories */
+
+	/* Statistcs */
+	u32 n_page_writes;
+	u32 n_page_reads;
+	u32 n_erasures;
+	u32 n_erase_failures;
+	u32 n_gc_copies;
+	u32 all_gcs;
+	u32 passive_gc_count;
+	u32 oldest_dirty_gc_count;
+	u32 n_gc_blocks;
+	u32 bg_gcs;
+	u32 n_retired_writes;
+	u32 n_retired_blocks;
+	u32 n_ecc_fixed;
+	u32 n_ecc_unfixed;
+	u32 n_tags_ecc_fixed;
+	u32 n_tags_ecc_unfixed;
+	u32 n_deletions;
+	u32 n_unmarked_deletions;
+	u32 refresh_count;
+	u32 cache_hits;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+	int struct_type;
+	int n_erased_blocks;
+	int alloc_block;	/* Current block being allocated off */
+	u32 alloc_page;
+	int n_free_chunks;
+
+	int n_deleted_files;	/* Count of files awaiting deletion; */
+	int n_unlinked_files;	/* Count of unlinked files. */
+	int n_bg_deletions;	/* Count of background deletions. */
+
+	/* yaffs2 runtime stuff */
+	unsigned seq_number;	/* Sequence number of currently allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+	int struct_type;
+	u32 magic;
+	u32 version;
+	u32 head;
+};
+
+struct yaffs_shadow_fixer {
+	int obj_id;
+	int shadowed_id;
+	struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+	int set;		/* If 0 then this is a deletion */
+	const YCHAR *name;
+	const void *data;
+	int size;
+	int flags;
+	int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+		     struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+int yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+		  int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+		  int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+				    const YCHAR * name, u32 mode, u32 uid,
+				    u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+				   u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+				     const YCHAR * name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+				 struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+				       const YCHAR * name, u32 mode, u32 uid,
+				       u32 gid, const YCHAR * alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+				       const YCHAR * name, u32 mode, u32 uid,
+				       u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+		      const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+		      int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump  */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+		     int lyn);
+int yaffs_check_ff(u8 * buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+			      struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev, int line_no);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+						 int number,
+						 enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+			    int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+				const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR * str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name,
+		    int force, int is_shrink, int shadows,
+		    struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+			       int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+					   struct yaffs_file_var *file_struct,
+					   u32 chunk_id,
+					   struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+		     int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+				       struct yaffs_file_var *file_struct,
+				       u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+			 unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100644
index 0000000..3b508cb
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,41 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+	struct list_head context_list;	/* List of these we have mounted */
+	struct yaffs_dev *dev;
+	struct super_block *super;
+	struct task_struct *bg_thread;	/* Background thread for this device */
+	int bg_running;
+	struct mutex gross_lock;	/* Gross locking mutex*/
+	u8 *spare_buffer;	/* For mtdif2 use. Don't know the size of the buffer
+				 * at compile time so we have to allocate it.
+				 */
+	struct list_head search_contexts;
+	void (*put_super_fn) (struct super_block * sb);
+
+	struct task_struct *readdir_process;
+	unsigned mount_id;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 0000000..7cf53b3
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,54 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+
+#include "yaffs_linux.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	u32 addr =
+	    ((loff_t) block_no) * dev->param.total_bytes_per_chunk
+	    * dev->param.chunks_per_block;
+	struct erase_info ei;
+
+	int retval = 0;
+
+	ei.mtd = mtd;
+	ei.addr = addr;
+	ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+	ei.time = 1000;
+	ei.retries = 2;
+	ei.callback = NULL;
+	ei.priv = (u_long) dev;
+
+	retval = mtd->erase(mtd, &ei);
+
+	if (retval == 0)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+}
+
+int nandmtd_initialise(struct yaffs_dev *dev)
+{
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 0000000..6665074
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no);
+int nandmtd_initialise(struct yaffs_dev *dev);
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif1.c b/fs/yaffs2/yaffs_mtdif1.c
new file mode 100644
index 0000000..5108369
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -0,0 +1,330 @@
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This module provides the interface between yaffs_nand.c and the
+ * MTD API.  This version is used when the MTD interface supports the
+ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+ * and we have small-page NAND device.
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+ * and the yaffs_tags compatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+ * We assume the MTD layer is performing ECC (use_nand_ecc is true).
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_packedtags1.h"
+#include "yaffs_tagscompat.h"	/* for yaffs_calc_tags_ecc */
+#include "yaffs_linux.h"
+
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#include "linux/mtd/mtd.h"
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+# define YTAG1_SIZE 8
+#else
+# define YTAG1_SIZE 9
+#endif
+
+/* Write a chunk (page) of data to NAND.
+ *
+ * Caller always provides ExtendedTags data which are converted to a more
+ * compact (packed) form for storage in NAND.  A mini-ECC runs over the
+ * contents of the tags meta-data; used to valid the tags when read.
+ *
+ *  - Pack ExtendedTags to packed_tags1 form
+ *  - Compute mini-ECC for packed_tags1
+ *  - Write data and packed tags to NAND.
+ *
+ * Note: Due to the use of the packed_tags1 meta-data which does not include
+ * a full sequence number (as found in the larger packed_tags2 form) it is
+ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+ * discarded and dirty.  This is not ideal: newer NAND parts are supposed
+ * to be written just once.  When Yaffs performs this operation, this
+ * function is called with a NULL data pointer -- calling MTD write_oob
+ * without data is valid usage (2.6.17).
+ *
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev,
+			      int nand_chunk, const u8 * data,
+			      const struct yaffs_ext_tags *etags)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int chunk_bytes = dev->data_bytes_per_chunk;
+	loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+	struct mtd_oob_ops ops;
+	struct yaffs_packed_tags1 pt1;
+	int retval;
+
+	/* we assume that packed_tags1 and struct yaffs_tags are compatible */
+	compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+	compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+	yaffs_pack_tags1(&pt1, etags);
+	yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+	/* When deleting a chunk, the upper layer provides only skeletal
+	 * etags, one with is_deleted set.  However, we need to update the
+	 * tags, not erase them completely.  So we use the NAND write property
+	 * that only zeroed-bits stick and set tag bytes to all-ones and
+	 * zero just the (not) deleted bit.
+	 */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+	if (etags->is_deleted) {
+		memset(&pt1, 0xff, 8);
+		/* clear delete status bit to indicate deleted */
+		pt1.deleted = 0;
+	}
+#else
+	((u8 *) & pt1)[8] = 0xff;
+	if (etags->is_deleted) {
+		memset(&pt1, 0xff, 8);
+		/* zero page_status byte to indicate deleted */
+		((u8 *) & pt1)[8] = 0;
+	}
+#endif
+
+	memset(&ops, 0, sizeof(ops));
+	ops.mode = MTD_OOB_AUTO;
+	ops.len = (data) ? chunk_bytes : 0;
+	ops.ooblen = YTAG1_SIZE;
+	ops.datbuf = (u8 *) data;
+	ops.oobbuf = (u8 *) & pt1;
+
+	retval = mtd->write_oob(mtd, addr, &ops);
+	if (retval) {
+		yaffs_trace(YAFFS_TRACE_MTD,
+			"write_oob failed, chunk %d, mtd error %d",
+			nand_chunk, retval);
+	}
+	return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Return with empty ExtendedTags but add ecc_result.
+ */
+static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval)
+{
+	if (etags) {
+		memset(etags, 0, sizeof(*etags));
+		etags->ecc_result = ecc_result;
+	}
+	return retval;
+}
+
+/* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+ * all members except ecc_result and block_bad are zeroed.
+ *
+ *  - Check ECC results for data (if applicable)
+ *  - Check for blank/erased block (return empty ExtendedTags if blank)
+ *  - Check the packed_tags1 mini-ECC (correct if necessary/possible)
+ *  - Convert packed_tags1 to ExtendedTags
+ *  - Update ecc_result and block_bad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev,
+			     int nand_chunk, u8 * data,
+			     struct yaffs_ext_tags *etags)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int chunk_bytes = dev->data_bytes_per_chunk;
+	loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+	int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+	struct mtd_oob_ops ops;
+	struct yaffs_packed_tags1 pt1;
+	int retval;
+	int deleted;
+
+	memset(&ops, 0, sizeof(ops));
+	ops.mode = MTD_OOB_AUTO;
+	ops.len = (data) ? chunk_bytes : 0;
+	ops.ooblen = YTAG1_SIZE;
+	ops.datbuf = data;
+	ops.oobbuf = (u8 *) & pt1;
+
+	/* Read page and oob using MTD.
+	 * Check status and determine ECC result.
+	 */
+	retval = mtd->read_oob(mtd, addr, &ops);
+	if (retval) {
+		yaffs_trace(YAFFS_TRACE_MTD,
+			"read_oob failed, chunk %d, mtd error %d",
+			nand_chunk, retval);
+	}
+
+	switch (retval) {
+	case 0:
+		/* no error */
+		break;
+
+	case -EUCLEAN:
+		/* MTD's ECC fixed the data */
+		eccres = YAFFS_ECC_RESULT_FIXED;
+		dev->n_ecc_fixed++;
+		break;
+
+	case -EBADMSG:
+		/* MTD's ECC could not fix the data */
+		dev->n_ecc_unfixed++;
+		/* fall into... */
+	default:
+		rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+		etags->block_bad = (mtd->block_isbad) (mtd, addr);
+		return YAFFS_FAIL;
+	}
+
+	/* Check for a blank/erased chunk.
+	 */
+	if (yaffs_check_ff((u8 *) & pt1, 8)) {
+		/* when blank, upper layers want ecc_result to be <= NO_ERROR */
+		return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+	}
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+	/* Read deleted status (bit) then return it to it's non-deleted
+	 * state before performing tags mini-ECC check. pt1.deleted is
+	 * inverted.
+	 */
+	deleted = !pt1.deleted;
+	pt1.deleted = 1;
+#else
+	deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7);
+#endif
+
+	/* Check the packed tags mini-ECC and correct if necessary/possible.
+	 */
+	retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+	switch (retval) {
+	case 0:
+		/* no tags error, use MTD result */
+		break;
+	case 1:
+		/* recovered tags-ECC error */
+		dev->n_tags_ecc_fixed++;
+		if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+			eccres = YAFFS_ECC_RESULT_FIXED;
+		break;
+	default:
+		/* unrecovered tags-ECC error */
+		dev->n_tags_ecc_unfixed++;
+		return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+	}
+
+	/* Unpack the tags to extended form and set ECC result.
+	 * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+	 */
+	pt1.should_be_ff = 0xFFFFFFFF;
+	yaffs_unpack_tags1(etags, &pt1);
+	etags->ecc_result = eccres;
+
+	/* Set deleted state */
+	etags->is_deleted = deleted;
+	return YAFFS_OK;
+}
+
+/* Mark a block bad.
+ *
+ * This is a persistant state.
+ * Use of this function should be rare.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+		"marking block %d bad", block_no);
+
+	retval = mtd->block_markbad(mtd, (loff_t) blocksize * block_no);
+	return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Check any MTD prerequists.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+static int nandmtd1_test_prerequists(struct mtd_info *mtd)
+{
+	/* 2.6.18 has mtd->ecclayout->oobavail */
+	/* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+	int oobavail = mtd->ecclayout->oobavail;
+
+	if (oobavail < YTAG1_SIZE) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"mtd device has only %d bytes for tags, need %d",
+			oobavail, YTAG1_SIZE);
+		return YAFFS_FAIL;
+	}
+	return YAFFS_OK;
+}
+
+/* Query for the current state of a specific block.
+ *
+ * Examine the tags of the first chunk of the block and return the state:
+ *  - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+ *  - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+ *  - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+ *
+ * Always returns YAFFS_OK.
+ */
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+			 enum yaffs_block_state *state_ptr, u32 * seq_ptr)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int chunk_num = block_no * dev->param.chunks_per_block;
+	loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk;
+	struct yaffs_ext_tags etags;
+	int state = YAFFS_BLOCK_STATE_DEAD;
+	int seqnum = 0;
+	int retval;
+
+	/* We don't yet have a good place to test for MTD config prerequists.
+	 * Do it here as we are called during the initial scan.
+	 */
+	if (nandmtd1_test_prerequists(mtd) != YAFFS_OK)
+		return YAFFS_FAIL;
+
+	retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags);
+	etags.block_bad = (mtd->block_isbad) (mtd, addr);
+	if (etags.block_bad) {
+		yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+			"block %d is marked bad", block_no);
+		state = YAFFS_BLOCK_STATE_DEAD;
+	} else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+		/* bad tags, need to look more closely */
+		state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+	} else if (etags.chunk_used) {
+		state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+		seqnum = etags.seq_number;
+	} else {
+		state = YAFFS_BLOCK_STATE_EMPTY;
+	}
+
+	*state_ptr = state;
+	*seq_ptr = seqnum;
+
+	/* query always succeeds */
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif1.h b/fs/yaffs2/yaffs_mtdif1.h
new file mode 100644
index 0000000..07ce452
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF1_H__
+#define __YAFFS_MTDIF1_H__
+
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+			      const u8 * data,
+			      const struct yaffs_ext_tags *tags);
+
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+			     u8 * data, struct yaffs_ext_tags *tags);
+
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no);
+
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+			 enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif2.c b/fs/yaffs2/yaffs_mtdif2.c
new file mode 100644
index 0000000..d1643df
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -0,0 +1,225 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* mtd interface for YAFFS2 */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_mtdif2.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_linux.h"
+
+/* NB For use with inband tags....
+ * We assume that the data buffer is of size total_bytes_per_chunk so that we can also
+ * use it to load the tags.
+ */
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+			      const u8 * data,
+			      const struct yaffs_ext_tags *tags)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	struct mtd_oob_ops ops;
+	int retval = 0;
+
+	loff_t addr;
+
+	struct yaffs_packed_tags2 pt;
+
+	int packed_tags_size =
+	    dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+	void *packed_tags_ptr =
+	    dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"nandmtd2_write_chunk_tags chunk %d data %p tags %p",
+		nand_chunk, data, tags);
+
+	addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+	/* For yaffs2 writing there must be both data and tags.
+	 * If we're using inband tags, then the tags are stuffed into
+	 * the end of the data buffer.
+	 */
+	if (!data || !tags)
+		BUG();
+	else if (dev->param.inband_tags) {
+		struct yaffs_packed_tags2_tags_only *pt2tp;
+		pt2tp =
+		    (struct yaffs_packed_tags2_tags_only *)(data +
+							    dev->
+							    data_bytes_per_chunk);
+		yaffs_pack_tags2_tags_only(pt2tp, tags);
+	} else {
+		yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+        }
+
+	ops.mode = MTD_OOB_AUTO;
+	ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
+	ops.len = dev->param.total_bytes_per_chunk;
+	ops.ooboffs = 0;
+	ops.datbuf = (u8 *) data;
+	ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+	retval = mtd->write_oob(mtd, addr, &ops);
+
+	if (retval == 0)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+}
+
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+			     u8 * data, struct yaffs_ext_tags *tags)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	struct mtd_oob_ops ops;
+
+	size_t dummy;
+	int retval = 0;
+	int local_data = 0;
+
+	loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+	struct yaffs_packed_tags2 pt;
+
+	int packed_tags_size =
+	    dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+	void *packed_tags_ptr =
+	    dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"nandmtd2_read_chunk_tags chunk %d data %p tags %p",
+		nand_chunk, data, tags);
+
+	if (dev->param.inband_tags) {
+
+		if (!data) {
+			local_data = 1;
+			data = yaffs_get_temp_buffer(dev, __LINE__);
+		}
+
+	}
+
+	if (dev->param.inband_tags || (data && !tags))
+		retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+				   &dummy, data);
+	else if (tags) {
+		ops.mode = MTD_OOB_AUTO;
+		ops.ooblen = packed_tags_size;
+		ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+		ops.ooboffs = 0;
+		ops.datbuf = data;
+		ops.oobbuf = yaffs_dev_to_lc(dev)->spare_buffer;
+		retval = mtd->read_oob(mtd, addr, &ops);
+	}
+
+	if (dev->param.inband_tags) {
+		if (tags) {
+			struct yaffs_packed_tags2_tags_only *pt2tp;
+			pt2tp =
+			    (struct yaffs_packed_tags2_tags_only *)&data[dev->
+									 data_bytes_per_chunk];
+			yaffs_unpack_tags2_tags_only(tags, pt2tp);
+		}
+	} else {
+		if (tags) {
+			memcpy(packed_tags_ptr,
+			       yaffs_dev_to_lc(dev)->spare_buffer,
+			       packed_tags_size);
+			yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+		}
+	}
+
+	if (local_data)
+		yaffs_release_temp_buffer(dev, data, __LINE__);
+
+	if (tags && retval == -EBADMSG
+	    && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+		tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+		dev->n_ecc_unfixed++;
+	}
+	if (tags && retval == -EUCLEAN
+	    && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+		tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+		dev->n_ecc_fixed++;
+	}
+	if (retval == 0)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+}
+
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int retval;
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"nandmtd2_mark_block_bad %d", block_no);
+
+	retval =
+	    mtd->block_markbad(mtd,
+			       block_no * dev->param.chunks_per_block *
+			       dev->param.total_bytes_per_chunk);
+
+	if (retval == 0)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+
+}
+
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+			 enum yaffs_block_state *state, u32 * seq_number)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_MTD, "nandmtd2_query_block %d", block_no);
+	retval =
+	    mtd->block_isbad(mtd,
+			     block_no * dev->param.chunks_per_block *
+			     dev->param.total_bytes_per_chunk);
+
+	if (retval) {
+		yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+		*state = YAFFS_BLOCK_STATE_DEAD;
+		*seq_number = 0;
+	} else {
+		struct yaffs_ext_tags t;
+		nandmtd2_read_chunk_tags(dev, block_no *
+					 dev->param.chunks_per_block, NULL, &t);
+
+		if (t.chunk_used) {
+			*seq_number = t.seq_number;
+			*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+		} else {
+			*seq_number = 0;
+			*state = YAFFS_BLOCK_STATE_EMPTY;
+		}
+	}
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"block is bad seq %d state %d", *seq_number, *state);
+
+	if (retval == 0)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+}
+
diff --git a/fs/yaffs2/yaffs_mtdif2.h b/fs/yaffs2/yaffs_mtdif2.h
new file mode 100644
index 0000000..d821126
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF2_H__
+#define __YAFFS_MTDIF2_H__
+
+#include "yaffs_guts.h"
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+			      const u8 * data,
+			      const struct yaffs_ext_tags *tags);
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+			     u8 * data, struct yaffs_ext_tags *tags);
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no);
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+			 enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 0000000..daa36f9
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,201 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of values and fits
+ * into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ *  sizeof(int) bytes   record size.
+ *  strnlen+1 bytes name null terminated.
+ *  nbytes    value.
+ *  ----------
+ *  total size  stored in record size 
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR * name,
+		     int *exist_size)
+{
+	int pos = 0;
+	int size;
+
+	memcpy(&size, xb, sizeof(int));
+	while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+		if (strncmp
+		    ((YCHAR *) (xb + pos + sizeof(int)), name, size) == 0) {
+			if (exist_size)
+				*exist_size = size;
+			return pos;
+		}
+		pos += size;
+		if (pos < xb_size - sizeof(int))
+			memcpy(&size, xb + pos, sizeof(int));
+		else
+			size = 0;
+	}
+	if (exist_size)
+		*exist_size = 0;
+	return -1;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+	int pos = 0;
+	int size;
+
+	memcpy(&size, xb + pos, sizeof(int));
+	while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+		pos += size;
+		if (pos < xb_size - sizeof(int))
+			memcpy(&size, xb + pos, sizeof(int));
+		else
+			size = 0;
+	}
+	return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR * name)
+{
+	int pos = nval_find(xb, xb_size, name, NULL);
+	int size;
+
+	if (pos >= 0 && pos < xb_size) {
+		/* Find size, shift rest over this record, then zero out the rest of buffer */
+		memcpy(&size, xb + pos, sizeof(int));
+		memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+		memset(xb + (xb_size - size), 0, size);
+		return 0;
+	} else {
+		return -ENODATA;
+        }
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+	     int bsize, int flags)
+{
+	int pos;
+	int namelen = strnlen(name, xb_size);
+	int reclen;
+	int size_exist = 0;
+	int space;
+	int start;
+
+	pos = nval_find(xb, xb_size, name, &size_exist);
+
+	if (flags & XATTR_CREATE && pos >= 0)
+		return -EEXIST;
+	if (flags & XATTR_REPLACE && pos < 0)
+		return -ENODATA;
+
+	start = nval_used(xb, xb_size);
+	space = xb_size - start + size_exist;
+
+	reclen = (sizeof(int) + namelen + 1 + bsize);
+
+	if (reclen > space)
+		return -ENOSPC;
+
+	if (pos >= 0) {
+		nval_del(xb, xb_size, name);
+		start = nval_used(xb, xb_size);
+	}
+
+	pos = start;
+
+	memcpy(xb + pos, &reclen, sizeof(int));
+	pos += sizeof(int);
+	strncpy((YCHAR *) (xb + pos), name, reclen);
+	pos += (namelen + 1);
+	memcpy(xb + pos, buf, bsize);
+	return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+	     int bsize)
+{
+	int pos = nval_find(xb, xb_size, name, NULL);
+	int size;
+
+	if (pos >= 0 && pos < xb_size) {
+
+		memcpy(&size, xb + pos, sizeof(int));
+		pos += sizeof(int);	/* advance past record length */
+		size -= sizeof(int);
+
+		/* Advance over name string */
+		while (xb[pos] && size > 0 && pos < xb_size) {
+			pos++;
+			size--;
+		}
+		/*Advance over NUL */
+		pos++;
+		size--;
+
+		if (size <= bsize) {
+			memcpy(buf, xb + pos, size);
+			return size;
+		}
+
+	}
+	if (pos >= 0)
+		return -ERANGE;
+	else
+		return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+	int pos = 0;
+	int size;
+	int name_len;
+	int ncopied = 0;
+	int filled = 0;
+
+	memcpy(&size, xb + pos, sizeof(int));
+	while (size > sizeof(int) && size <= xb_size && (pos + size) < xb_size
+	       && !filled) {
+		pos += sizeof(int);
+		size -= sizeof(int);
+		name_len = strnlen((YCHAR *) (xb + pos), size);
+		if (ncopied + name_len + 1 < bsize) {
+			memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+			buf += name_len;
+			*buf = '\0';
+			buf++;
+			if (sizeof(YCHAR) > 1) {
+				*buf = '\0';
+				buf++;
+			}
+			ncopied += (name_len + 1);
+		} else {
+			filled = 1;
+                }
+		pos += size;
+		if (pos < xb_size - sizeof(int))
+			memcpy(&size, xb + pos, sizeof(int));
+		else
+			size = 0;
+	}
+	return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+	return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 0000000..2bb02b6
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+	     int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+	     int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 0000000..e816cab
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,127 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsvalidity.h"
+
+#include "yaffs_getblockinfo.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+			     u8 * buffer, struct yaffs_ext_tags *tags)
+{
+	int result;
+	struct yaffs_ext_tags local_tags;
+
+	int realigned_chunk = nand_chunk - dev->chunk_offset;
+
+	dev->n_page_reads++;
+
+	/* If there are no tags provided, use local tags to get prioritised gc working */
+	if (!tags)
+		tags = &local_tags;
+
+	if (dev->param.read_chunk_tags_fn)
+		result =
+		    dev->param.read_chunk_tags_fn(dev, realigned_chunk, buffer,
+						  tags);
+	else
+		result = yaffs_tags_compat_rd(dev,
+					      realigned_chunk, buffer, tags);
+	if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+		struct yaffs_block_info *bi;
+		bi = yaffs_get_block_info(dev,
+					  nand_chunk /
+					  dev->param.chunks_per_block);
+		yaffs_handle_chunk_error(dev, bi);
+	}
+
+	return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+			     int nand_chunk,
+			     const u8 * buffer, struct yaffs_ext_tags *tags)
+{
+
+	dev->n_page_writes++;
+
+	nand_chunk -= dev->chunk_offset;
+
+	if (tags) {
+		tags->seq_number = dev->seq_number;
+		tags->chunk_used = 1;
+		if (!yaffs_validate_tags(tags)) {
+			yaffs_trace(YAFFS_TRACE_ERROR, "Writing uninitialised tags");
+			YBUG();
+		}
+		yaffs_trace(YAFFS_TRACE_WRITE,
+			"Writing chunk %d tags %d %d",
+			nand_chunk, tags->obj_id, tags->chunk_id);
+	} else {
+		yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+		YBUG();
+	}
+
+	if (dev->param.write_chunk_tags_fn)
+		return dev->param.write_chunk_tags_fn(dev, nand_chunk, buffer,
+						      tags);
+	else
+		return yaffs_tags_compat_wr(dev, nand_chunk, buffer, tags);
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+	block_no -= dev->block_offset;
+
+	if (dev->param.bad_block_fn)
+		return dev->param.bad_block_fn(dev, block_no);
+	else
+		return yaffs_tags_compat_mark_bad(dev, block_no);
+}
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+				 int block_no,
+				 enum yaffs_block_state *state,
+				 u32 * seq_number)
+{
+	block_no -= dev->block_offset;
+
+	if (dev->param.query_block_fn)
+		return dev->param.query_block_fn(dev, block_no, state,
+						 seq_number);
+	else
+		return yaffs_tags_compat_query_block(dev, block_no,
+						     state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block)
+{
+	int result;
+
+	flash_block -= dev->block_offset;
+
+	dev->n_erasures++;
+
+	result = dev->param.erase_fn(dev, flash_block);
+
+	return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+	if (dev->param.initialise_flash_fn)
+		return dev->param.initialise_flash_fn(dev);
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 0000000..543f198
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+			     u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+			     int nand_chunk,
+			     const u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+				 int block_no,
+				 enum yaffs_block_state *state,
+				 unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 0000000..a77f095
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,53 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+		      const struct yaffs_ext_tags *t)
+{
+	pt->chunk_id = t->chunk_id;
+	pt->serial_number = t->serial_number;
+	pt->n_bytes = t->n_bytes;
+	pt->obj_id = t->obj_id;
+	pt->ecc = 0;
+	pt->deleted = (t->is_deleted) ? 0 : 1;
+	pt->unused_stuff = 0;
+	pt->should_be_ff = 0xFFFFFFFF;
+
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+			const struct yaffs_packed_tags1 *pt)
+{
+	static const u8 all_ff[] =
+	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff
+	};
+
+	if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+		t->block_bad = 0;
+		if (pt->should_be_ff != 0xFFFFFFFF)
+			t->block_bad = 1;
+		t->chunk_used = 1;
+		t->obj_id = pt->obj_id;
+		t->chunk_id = pt->chunk_id;
+		t->n_bytes = pt->n_bytes;
+		t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+		t->is_deleted = (pt->deleted) ? 0 : 1;
+		t->serial_number = pt->serial_number;
+	} else {
+		memset(t, 0, sizeof(struct yaffs_ext_tags));
+	}
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 0000000..d6861ff
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+	unsigned chunk_id:20;
+	unsigned serial_number:2;
+	unsigned n_bytes:10;
+	unsigned obj_id:18;
+	unsigned ecc:12;
+	unsigned deleted:1;
+	unsigned unused_stuff:1;
+	unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+		      const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+			const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 0000000..8e7fea3
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,196 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_tagsvalidity.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG	0x80000000
+#define EXTRA_SHRINK_FLAG	0x40000000
+#define EXTRA_SHADOWS_FLAG	0x20000000
+#define EXTRA_SPARE_FLAGS	0x10000000
+
+#define ALL_EXTRA_FLAGS		0xF0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK  ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(const struct
+					      yaffs_packed_tags2_tags_only *ptt)
+{
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"packed tags obj %d chunk %d byte %d seq %d",
+		ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+	yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+		t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+		t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+		t->seq_number);
+
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+				const struct yaffs_ext_tags *t)
+{
+	ptt->chunk_id = t->chunk_id;
+	ptt->seq_number = t->seq_number;
+	ptt->n_bytes = t->n_bytes;
+	ptt->obj_id = t->obj_id;
+
+	if (t->chunk_id == 0 && t->extra_available) {
+		/* Store the extra header info instead */
+		/* We save the parent object in the chunk_id */
+		ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+		if (t->extra_is_shrink)
+			ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+		if (t->extra_shadows)
+			ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+		ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+		ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+		if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+			ptt->n_bytes = t->extra_equiv_id;
+		else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+			ptt->n_bytes = t->extra_length;
+		else
+			ptt->n_bytes = 0;
+	}
+
+	yaffs_dump_packed_tags2_tags_only(ptt);
+	yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+		      const struct yaffs_ext_tags *t, int tags_ecc)
+{
+	yaffs_pack_tags2_tags_only(&pt->t, t);
+
+	if (tags_ecc)
+		yaffs_ecc_calc_other((unsigned char *)&pt->t,
+				     sizeof(struct
+					    yaffs_packed_tags2_tags_only),
+				     &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+				  struct yaffs_packed_tags2_tags_only *ptt)
+{
+
+	memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+	yaffs_init_tags(t);
+
+	if (ptt->seq_number != 0xFFFFFFFF) {
+		t->block_bad = 0;
+		t->chunk_used = 1;
+		t->obj_id = ptt->obj_id;
+		t->chunk_id = ptt->chunk_id;
+		t->n_bytes = ptt->n_bytes;
+		t->is_deleted = 0;
+		t->serial_number = 0;
+		t->seq_number = ptt->seq_number;
+
+		/* Do extra header info stuff */
+
+		if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+			t->chunk_id = 0;
+			t->n_bytes = 0;
+
+			t->extra_available = 1;
+			t->extra_parent_id =
+			    ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+			t->extra_is_shrink =
+			    (ptt->chunk_id & EXTRA_SHRINK_FLAG) ? 1 : 0;
+			t->extra_shadows =
+			    (ptt->chunk_id & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+			t->extra_obj_type =
+			    ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+			t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+			if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+				t->extra_equiv_id = ptt->n_bytes;
+			else
+				t->extra_length = ptt->n_bytes;
+		}
+	}
+
+	yaffs_dump_packed_tags2_tags_only(ptt);
+	yaffs_dump_tags2(t);
+
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+			int tags_ecc)
+{
+
+	enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+	if (pt->t.seq_number != 0xFFFFFFFF && tags_ecc) {
+		/* Chunk is in use and we need to do ECC */
+
+		struct yaffs_ecc_other ecc;
+		int result;
+		yaffs_ecc_calc_other((unsigned char *)&pt->t,
+				     sizeof(struct
+					    yaffs_packed_tags2_tags_only),
+				     &ecc);
+		result =
+		    yaffs_ecc_correct_other((unsigned char *)&pt->t,
+					    sizeof(struct
+						   yaffs_packed_tags2_tags_only),
+					    &pt->ecc, &ecc);
+		switch (result) {
+		case 0:
+			ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+			break;
+		case 1:
+			ecc_result = YAFFS_ECC_RESULT_FIXED;
+			break;
+		case -1:
+			ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+			break;
+		default:
+			ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+		}
+	}
+
+	yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+	t->ecc_result = ecc_result;
+
+	yaffs_dump_packed_tags2(pt);
+	yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 0000000..f329669
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+	unsigned seq_number;
+	unsigned obj_id;
+	unsigned chunk_id;
+	unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+	struct yaffs_packed_tags2_tags_only t;
+	struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+		      const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+			int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+				const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+				  struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 0000000..7578075
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations  *********/
+
+void yaffs_calc_ecc(const u8 * data, struct yaffs_spare *spare)
+{
+	yaffs_ecc_cacl(data, spare->ecc1);
+	yaffs_ecc_cacl(&data[256], spare->ecc2);
+}
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+	/* Calculate an ecc */
+
+	unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+	unsigned i, j;
+	unsigned ecc = 0;
+	unsigned bit = 0;
+
+	tags->ecc = 0;
+
+	for (i = 0; i < 8; i++) {
+		for (j = 1; j & 0xff; j <<= 1) {
+			bit++;
+			if (b[i] & j)
+				ecc ^= bit;
+		}
+	}
+
+	tags->ecc = ecc;
+
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+	unsigned ecc = tags->ecc;
+
+	yaffs_calc_tags_ecc(tags);
+
+	ecc ^= tags->ecc;
+
+	if (ecc && ecc <= 64) {
+		/* TODO: Handle the failure better. Retire? */
+		unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+		ecc--;
+
+		b[ecc / 8] ^= (1 << (ecc & 7));
+
+		/* Now recvalc the ecc */
+		yaffs_calc_tags_ecc(tags);
+
+		return 1;	/* recovered error */
+	} else if (ecc) {
+		/* Wierd ecc failure value */
+		/* TODO Need to do somethiong here */
+		return -1;	/* unrecovered error */
+	}
+
+	return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+				     struct yaffs_tags *tags_ptr)
+{
+	union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+	yaffs_calc_tags_ecc(tags_ptr);
+
+	spare_ptr->tb0 = tu->as_bytes[0];
+	spare_ptr->tb1 = tu->as_bytes[1];
+	spare_ptr->tb2 = tu->as_bytes[2];
+	spare_ptr->tb3 = tu->as_bytes[3];
+	spare_ptr->tb4 = tu->as_bytes[4];
+	spare_ptr->tb5 = tu->as_bytes[5];
+	spare_ptr->tb6 = tu->as_bytes[6];
+	spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+				      struct yaffs_spare *spare_ptr,
+				      struct yaffs_tags *tags_ptr)
+{
+	union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+	int result;
+
+	tu->as_bytes[0] = spare_ptr->tb0;
+	tu->as_bytes[1] = spare_ptr->tb1;
+	tu->as_bytes[2] = spare_ptr->tb2;
+	tu->as_bytes[3] = spare_ptr->tb3;
+	tu->as_bytes[4] = spare_ptr->tb4;
+	tu->as_bytes[5] = spare_ptr->tb5;
+	tu->as_bytes[6] = spare_ptr->tb6;
+	tu->as_bytes[7] = spare_ptr->tb7;
+
+	result = yaffs_check_tags_ecc(tags_ptr);
+	if (result > 0)
+		dev->n_tags_ecc_fixed++;
+	else if (result < 0)
+		dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+	memset(spare, 0xFF, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+			 int nand_chunk, const u8 * data,
+			 struct yaffs_spare *spare)
+{
+	if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>> yaffs chunk %d is not valid",
+			nand_chunk);
+		return YAFFS_FAIL;
+	}
+
+	return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+			       int nand_chunk,
+			       u8 * data,
+			       struct yaffs_spare *spare,
+			       enum yaffs_ecc_result *ecc_result,
+			       int correct_errors)
+{
+	int ret_val;
+	struct yaffs_spare local_spare;
+
+	if (!spare && data) {
+		/* If we don't have a real spare, then we use a local one. */
+		/* Need this for the calculation of the ecc */
+		spare = &local_spare;
+	}
+
+	if (!dev->param.use_nand_ecc) {
+		ret_val =
+		    dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+		if (data && correct_errors) {
+			/* Do ECC correction */
+			/* Todo handle any errors */
+			int ecc_result1, ecc_result2;
+			u8 calc_ecc[3];
+
+			yaffs_ecc_cacl(data, calc_ecc);
+			ecc_result1 =
+			    yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+			yaffs_ecc_cacl(&data[256], calc_ecc);
+			ecc_result2 =
+			    yaffs_ecc_correct(&data[256], spare->ecc2,
+					      calc_ecc);
+
+			if (ecc_result1 > 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>yaffs ecc error fix performed on chunk %d:0",
+					nand_chunk);
+				dev->n_ecc_fixed++;
+			} else if (ecc_result1 < 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>yaffs ecc error unfixed on chunk %d:0",
+					nand_chunk);
+				dev->n_ecc_unfixed++;
+			}
+
+			if (ecc_result2 > 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>yaffs ecc error fix performed on chunk %d:1",
+					nand_chunk);
+				dev->n_ecc_fixed++;
+			} else if (ecc_result2 < 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>yaffs ecc error unfixed on chunk %d:1",
+					nand_chunk);
+				dev->n_ecc_unfixed++;
+			}
+
+			if (ecc_result1 || ecc_result2) {
+				/* We had a data problem on this page */
+				yaffs_handle_rd_data_error(dev, nand_chunk);
+			}
+
+			if (ecc_result1 < 0 || ecc_result2 < 0)
+				*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+			else if (ecc_result1 > 0 || ecc_result2 > 0)
+				*ecc_result = YAFFS_ECC_RESULT_FIXED;
+			else
+				*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+		}
+	} else {
+		/* Must allocate enough memory for spare+2*sizeof(int) */
+		/* for ecc results from device. */
+		struct yaffs_nand_spare nspare;
+
+		memset(&nspare, 0, sizeof(nspare));
+
+		ret_val = dev->param.read_chunk_fn(dev, nand_chunk, data,
+						   (struct yaffs_spare *)
+						   &nspare);
+		memcpy(spare, &nspare, sizeof(struct yaffs_spare));
+		if (data && correct_errors) {
+			if (nspare.eccres1 > 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>mtd ecc error fix performed on chunk %d:0",
+					nand_chunk);
+			} else if (nspare.eccres1 < 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>mtd ecc error unfixed on chunk %d:0",
+					nand_chunk);
+			}
+
+			if (nspare.eccres2 > 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>mtd ecc error fix performed on chunk %d:1",
+					nand_chunk);
+			} else if (nspare.eccres2 < 0) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"**>>mtd ecc error unfixed on chunk %d:1",
+					nand_chunk);
+			}
+
+			if (nspare.eccres1 || nspare.eccres2) {
+				/* We had a data problem on this page */
+				yaffs_handle_rd_data_error(dev, nand_chunk);
+			}
+
+			if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+				*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+			else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+				*ecc_result = YAFFS_ECC_RESULT_FIXED;
+			else
+				*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+		}
+	}
+	return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+	int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+	/* Mark the block for retirement */
+	yaffs_get_block_info(dev,
+			     flash_block + dev->block_offset)->needs_retiring =
+	    1;
+	yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+		"**>>Block %d marked for retirement",
+		flash_block);
+
+	/* TODO:
+	 * Just do a garbage collection on the affected block
+	 * then retire the block
+	 * NB recursion
+	 */
+}
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 const u8 * data, const struct yaffs_ext_tags *ext_tags)
+{
+	struct yaffs_spare spare;
+	struct yaffs_tags tags;
+
+	yaffs_spare_init(&spare);
+
+	if (ext_tags->is_deleted)
+		spare.page_status = 0;
+	else {
+		tags.obj_id = ext_tags->obj_id;
+		tags.chunk_id = ext_tags->chunk_id;
+
+		tags.n_bytes_lsb = ext_tags->n_bytes & 0x3ff;
+
+		if (dev->data_bytes_per_chunk >= 1024)
+			tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+		else
+			tags.n_bytes_msb = 3;
+
+		tags.serial_number = ext_tags->serial_number;
+
+		if (!dev->param.use_nand_ecc && data)
+			yaffs_calc_ecc(data, &spare);
+
+		yaffs_load_tags_to_spare(&spare, &tags);
+
+	}
+
+	return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 u8 * data, struct yaffs_ext_tags *ext_tags)
+{
+
+	struct yaffs_spare spare;
+	struct yaffs_tags tags;
+	enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+
+	static struct yaffs_spare spare_ff;
+	static int init;
+
+	if (!init) {
+		memset(&spare_ff, 0xFF, sizeof(spare_ff));
+		init = 1;
+	}
+
+	if (yaffs_rd_chunk_nand(dev, nand_chunk, data, &spare, &ecc_result, 1)) {
+		/* ext_tags may be NULL */
+		if (ext_tags) {
+
+			int deleted =
+			    (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+			ext_tags->is_deleted = deleted;
+			ext_tags->ecc_result = ecc_result;
+			ext_tags->block_bad = 0;	/* We're reading it */
+			/* therefore it is not a bad block */
+			ext_tags->chunk_used =
+			    (memcmp(&spare_ff, &spare, sizeof(spare_ff)) !=
+			     0) ? 1 : 0;
+
+			if (ext_tags->chunk_used) {
+				yaffs_get_tags_from_spare(dev, &spare, &tags);
+
+				ext_tags->obj_id = tags.obj_id;
+				ext_tags->chunk_id = tags.chunk_id;
+				ext_tags->n_bytes = tags.n_bytes_lsb;
+
+				if (dev->data_bytes_per_chunk >= 1024)
+					ext_tags->n_bytes |=
+					    (((unsigned)tags.
+					      n_bytes_msb) << 10);
+
+				ext_tags->serial_number = tags.serial_number;
+			}
+		}
+
+		return YAFFS_OK;
+	} else {
+		return YAFFS_FAIL;
+	}
+}
+
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+
+	struct yaffs_spare spare;
+
+	memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+	spare.block_status = 'Y';
+
+	yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+		      &spare);
+	yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+		      NULL, &spare);
+
+	return YAFFS_OK;
+
+}
+
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+				  int block_no,
+				  enum yaffs_block_state *state,
+				  u32 * seq_number)
+{
+
+	struct yaffs_spare spare0, spare1;
+	static struct yaffs_spare spare_ff;
+	static int init;
+	enum yaffs_ecc_result dummy;
+
+	if (!init) {
+		memset(&spare_ff, 0xFF, sizeof(spare_ff));
+		init = 1;
+	}
+
+	*seq_number = 0;
+
+	yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+			    &spare0, &dummy, 1);
+	yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+			    NULL, &spare1, &dummy, 1);
+
+	if (hweight8(spare0.block_status & spare1.block_status) < 7)
+		*state = YAFFS_BLOCK_STATE_DEAD;
+	else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+		*state = YAFFS_BLOCK_STATE_EMPTY;
+	else
+		*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 0000000..8cd35dc
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,36 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+#include "yaffs_guts.h"
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 const u8 * data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 u8 * data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+				  int block_no,
+				  enum yaffs_block_state *state,
+				  u32 * seq_number);
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+int yaffs_count_bits(u8 byte);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsvalidity.c b/fs/yaffs2/yaffs_tagsvalidity.c
new file mode 100644
index 0000000..4358d79
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.c
@@ -0,0 +1,27 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_tagsvalidity.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags)
+{
+	memset(tags, 0, sizeof(struct yaffs_ext_tags));
+	tags->validity0 = 0xAAAAAAAA;
+	tags->validity1 = 0x55555555;
+}
+
+int yaffs_validate_tags(struct yaffs_ext_tags *tags)
+{
+	return (tags->validity0 == 0xAAAAAAAA && tags->validity1 == 0x55555555);
+
+}
diff --git a/fs/yaffs2/yaffs_tagsvalidity.h b/fs/yaffs2/yaffs_tagsvalidity.h
new file mode 100644
index 0000000..36a021f
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGS_VALIDITY_H__
+#define __YAFFS_TAGS_VALIDITY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags);
+int yaffs_validate_tags(struct yaffs_ext_tags *tags);
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 0000000..6273dbf
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS			0x00000002
+#define YAFFS_TRACE_ALLOCATE		0x00000004
+#define YAFFS_TRACE_SCAN		0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS		0x00000010
+#define YAFFS_TRACE_ERASE		0x00000020
+#define YAFFS_TRACE_GC			0x00000040
+#define YAFFS_TRACE_WRITE		0x00000080
+#define YAFFS_TRACE_TRACING		0x00000100
+#define YAFFS_TRACE_DELETION		0x00000200
+#define YAFFS_TRACE_BUFFERS		0x00000400
+#define YAFFS_TRACE_NANDACCESS		0x00000800
+#define YAFFS_TRACE_GC_DETAIL		0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG		0x00002000
+#define YAFFS_TRACE_MTD			0x00004000
+#define YAFFS_TRACE_CHECKPOINT		0x00008000
+
+#define YAFFS_TRACE_VERIFY		0x00010000
+#define YAFFS_TRACE_VERIFY_NAND		0x00020000
+#define YAFFS_TRACE_VERIFY_FULL		0x00040000
+#define YAFFS_TRACE_VERIFY_ALL		0x000F0000
+
+#define YAFFS_TRACE_SYNC		0x00100000
+#define YAFFS_TRACE_BACKGROUND		0x00200000
+#define YAFFS_TRACE_LOCK		0x00400000
+#define YAFFS_TRACE_MOUNT		0x00800000
+
+#define YAFFS_TRACE_ERROR		0x40000000
+#define YAFFS_TRACE_BUG			0x80000000
+#define YAFFS_TRACE_ALWAYS		0xF0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 0000000..738c7f6
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,535 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+	dev = dev;
+	return !(yaffs_trace_mask &
+		 (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+	dev = dev;
+	return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+	dev = dev;
+	return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char *block_state_name[] = {
+	"Unknown",
+	"Needs scanning",
+	"Scanning",
+	"Empty",
+	"Allocating",
+	"Full",
+	"Dirty",
+	"Checkpoint",
+	"Collecting",
+	"Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+	int actually_used;
+	int in_use;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	/* Report illegal runtime states */
+	if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has undefined state %d",
+			n, bi->block_state);
+
+	switch (bi->block_state) {
+	case YAFFS_BLOCK_STATE_UNKNOWN:
+	case YAFFS_BLOCK_STATE_SCANNING:
+	case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has bad run-state %s",
+			n, block_state_name[bi->block_state]);
+	}
+
+	/* Check pages in use and soft deletions are legal */
+
+	actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+	if (bi->pages_in_use < 0
+	    || bi->pages_in_use > dev->param.chunks_per_block
+	    || bi->soft_del_pages < 0
+	    || bi->soft_del_pages > dev->param.chunks_per_block
+	    || actually_used < 0 || actually_used > dev->param.chunks_per_block)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has illegal values pages_in_used %d soft_del_pages %d",
+			n, bi->pages_in_use, bi->soft_del_pages);
+
+	/* Check chunk bitmap legal */
+	in_use = yaffs_count_chunk_bits(dev, n);
+	if (in_use != bi->pages_in_use)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+			n, bi->pages_in_use, in_use);
+
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+				struct yaffs_block_info *bi, int n)
+{
+	yaffs_verify_blk(dev, bi, n);
+
+	/* After collection the block should be in the erased state */
+
+	if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+	    bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Block %d is in state %d after gc, should be erased",
+			n, bi->block_state);
+	}
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+	int i;
+	int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+	int illegal_states = 0;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	memset(state_count, 0, sizeof(state_count));
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+		yaffs_verify_blk(dev, bi, i);
+
+		if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+			state_count[bi->block_state]++;
+		else
+			illegal_states++;
+	}
+
+	yaffs_trace(YAFFS_TRACE_VERIFY,	"Block summary");
+
+	yaffs_trace(YAFFS_TRACE_VERIFY,
+		"%d blocks have illegal states",
+		illegal_states);
+	if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Too many allocating blocks");
+
+	for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"%s %d blocks",
+			block_state_name[i], state_count[i]);
+
+	if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Checkpoint block count wrong dev %d count %d",
+			dev->blocks_in_checkpt,
+			state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+	if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Erased block count wrong dev %d count %d",
+			dev->n_erased_blocks,
+			state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+	if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Too many collecting blocks %d (max is 1)",
+			state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+ * case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+		     struct yaffs_ext_tags *tags, int parent_check)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+
+	if (!(tags && obj && oh)) {
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Verifying object header tags %p obj %p oh %p",
+			tags, obj, oh);
+		return;
+	}
+
+	if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+	    oh->type > YAFFS_OBJECT_TYPE_MAX)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header type is illegal value 0x%x",
+			tags->obj_id, oh->type);
+
+	if (tags->obj_id != obj->obj_id)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header mismatch obj_id %d",
+			tags->obj_id, obj->obj_id);
+
+	/*
+	 * Check that the object's parent ids match if parent_check requested.
+	 *
+	 * Tests do not apply to the root object.
+	 */
+
+	if (parent_check && tags->obj_id > 1 && !obj->parent)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header mismatch parent_id %d obj->parent is NULL",
+			tags->obj_id, oh->parent_obj_id);
+
+	if (parent_check && obj->parent &&
+	    oh->parent_obj_id != obj->parent->obj_id &&
+	    (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+	     obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header mismatch parent_id %d parent_obj_id %d",
+			tags->obj_id, oh->parent_obj_id,
+			obj->parent->obj_id);
+
+	if (tags->obj_id > 1 && oh->name[0] == 0)	/* Null name */
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header name is NULL",
+			obj->obj_id);
+
+	if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff)	/* Trashed name */
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header name is 0xFF",
+			obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+	int required_depth;
+	int actual_depth;
+	u32 last_chunk;
+	u32 x;
+	u32 i;
+	struct yaffs_dev *dev;
+	struct yaffs_ext_tags tags;
+	struct yaffs_tnode *tn;
+	u32 obj_id;
+
+	if (!obj)
+		return;
+
+	if (yaffs_skip_verification(obj->my_dev))
+		return;
+
+	dev = obj->my_dev;
+	obj_id = obj->obj_id;
+
+	/* Check file size is consistent with tnode depth */
+	last_chunk =
+	    obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1;
+	x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+	required_depth = 0;
+	while (x > 0) {
+		x >>= YAFFS_TNODES_INTERNAL_BITS;
+		required_depth++;
+	}
+
+	actual_depth = obj->variant.file_variant.top_level;
+
+	/* Check that the chunks in the tnode tree are all correct.
+	 * We do this by scanning through the tnode tree and
+	 * checking the tags for every chunk match.
+	 */
+
+	if (yaffs_skip_nand_verification(dev))
+		return;
+
+	for (i = 1; i <= last_chunk; i++) {
+		tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+		if (tn) {
+			u32 the_chunk = yaffs_get_group_base(dev, tn, i);
+			if (the_chunk > 0) {
+				yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+							 &tags);
+				if (tags.obj_id != obj_id || tags.chunk_id != i)
+				yaffs_trace(YAFFS_TRACE_VERIFY,
+					"Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+					 obj_id, i, the_chunk,
+					 tags.obj_id, tags.chunk_id);
+			}
+		}
+	}
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+
+	/* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+
+	/* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev;
+
+	u32 chunk_min;
+	u32 chunk_max;
+
+	u32 chunk_id_ok;
+	u32 chunk_in_range;
+	u32 chunk_wrongly_deleted;
+	u32 chunk_valid;
+
+	if (!obj)
+		return;
+
+	if (obj->being_created)
+		return;
+
+	dev = obj->my_dev;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	/* Check sane object header chunk */
+
+	chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+	chunk_max =
+	    (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+	chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+			  ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+	chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+	chunk_valid = chunk_in_range &&
+	    yaffs_check_chunk_bit(dev,
+				  obj->hdr_chunk / dev->param.chunks_per_block,
+				  obj->hdr_chunk % dev->param.chunks_per_block);
+	chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+	if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d has chunk_id %d %s %s",
+			obj->obj_id, obj->hdr_chunk,
+			chunk_id_ok ? "" : ",out of range",
+			chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+	if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+		struct yaffs_ext_tags tags;
+		struct yaffs_obj_hdr *oh;
+		u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+		oh = (struct yaffs_obj_hdr *)buffer;
+
+		yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+		yaffs_verify_oh(obj, oh, &tags, 1);
+
+		yaffs_release_temp_buffer(dev, buffer, __LINE__);
+	}
+
+	/* Verify it has a parent */
+	if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d has parent pointer %p which does not look like an object",
+			obj->obj_id, obj->parent);
+	}
+
+	/* Verify parent is a directory */
+	if (obj->parent
+	    && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d's parent is not a directory (type %d)",
+			obj->obj_id, obj->parent->variant_type);
+	}
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		yaffs_verify_file(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		yaffs_verify_symlink(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		yaffs_verify_dir(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		yaffs_verify_link(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		yaffs_verify_special(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+	default:
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d has illegaltype %d",
+		   obj->obj_id, obj->variant_type);
+		break;
+	}
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	int i;
+	struct list_head *lh;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	/* Iterate through the objects in each hash entry */
+
+	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+		list_for_each(lh, &dev->obj_bucket[i].list) {
+			if (lh) {
+				obj =
+				    list_entry(lh, struct yaffs_obj, hash_link);
+				yaffs_verify_obj(obj);
+			}
+		}
+	}
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+	struct list_head *lh;
+	struct yaffs_obj *list_obj;
+
+	int count = 0;
+
+	if (!obj) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+		YBUG();
+		return;
+	}
+
+	if (yaffs_skip_verification(obj->my_dev))
+		return;
+
+	if (!obj->parent) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent" );
+		YBUG();
+		return;
+	}
+
+	if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+		YBUG();
+	}
+
+	/* Iterate through the objects in each hash entry */
+
+	list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+		if (lh) {
+			list_obj = list_entry(lh, struct yaffs_obj, siblings);
+			yaffs_verify_obj(list_obj);
+			if (obj == list_obj)
+				count++;
+		}
+	}
+
+	if (count != 1) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"Object in directory %d times",
+			count);
+		YBUG();
+	}
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+	struct list_head *lh;
+	struct yaffs_obj *list_obj;
+
+	if (!directory) {
+		YBUG();
+		return;
+	}
+
+	if (yaffs_skip_full_verification(directory->my_dev))
+		return;
+
+	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"Directory has wrong type: %d",
+			directory->variant_type);
+		YBUG();
+	}
+
+	/* Iterate through the objects in each hash entry */
+
+	list_for_each(lh, &directory->variant.dir_variant.children) {
+		if (lh) {
+			list_obj = list_entry(lh, struct yaffs_obj, siblings);
+			if (list_obj->parent != directory) {
+				yaffs_trace(YAFFS_TRACE_ALWAYS,
+					"Object in directory list has wrong parent %p",
+					list_obj->parent);
+				YBUG();
+			}
+			yaffs_verify_obj_in_dir(list_obj);
+		}
+	}
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+	int counted;
+	int difference;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	counted = yaffs_count_free_chunks(dev);
+
+	difference = dev->n_free_chunks - counted;
+
+	if (difference) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+		 	"Freechunks verification failure %d %d %d",
+			dev->n_free_chunks, counted, difference);
+		yaffs_free_verification_failures++;
+	}
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+	in = in;
+	return YAFFS_OK;
+}
+
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 0000000..cc6f889
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+		      int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+				struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+		     struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100644
index 0000000..d5b8753
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,2792 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ *         this superblock
+ * >> 2.6: sb->s_fs_info  points to the struct yaffs_dev associated with this
+ *         superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * NB There are two variants of Linux VFS glue code. This variant supports
+ * a single version and should not include any multi-version code.
+ */
+#include <linux/version.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include <linux/exportfs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+
+#include <asm/div64.h>
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)
+
+#define yaffs_devname(sb, buf)	bdevname(sb->s_bdev, buf)
+
+#define YPROC_ROOT  NULL
+
+#define Y_INIT_TIMER(a)	init_timer_on_stack(a)
+
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+	uint64_t result = partition_size;
+	do_div(result, block_size);
+	return (uint32_t) result;
+}
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_mtdif1.h"
+#include "yaffs_mtdif2.h"
+
+unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+
+/* Module Parameters */
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+
+
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#define yaffs_inode_to_obj(iptr) ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+#define yaffs_super_to_dev(sb)	((struct yaffs_dev *)sb->s_fs_info)
+
+#define update_dir_time(dir) do {\
+			(dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+		} while(0)
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+	return yaffs_gc_control;
+}
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+	mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+	mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+				      struct yaffs_obj *obj);
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+	struct inode *inode;
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+	inode = iget_locked(sb, ino);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	/* NB This is called as a side effect of other functions, but
+	 * we had to release the lock to prevent deadlocks, so
+	 * need to lock again.
+	 */
+
+	yaffs_gross_lock(dev);
+
+	obj = yaffs_find_by_number(dev, inode->i_ino);
+
+	yaffs_fill_inode_from_obj(inode, obj);
+
+	yaffs_gross_unlock(dev);
+
+	unlock_new_inode(inode);
+	return inode;
+}
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+			      struct yaffs_obj *obj)
+{
+	struct inode *inode;
+
+	if (!sb) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_get_inode for NULL super_block!!");
+		return NULL;
+
+	}
+
+	if (!obj) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_get_inode for NULL object!!");
+		return NULL;
+
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_get_inode for object %d",
+		obj->obj_id);
+
+	inode = yaffs_iget(sb, obj->obj_id);
+	if (IS_ERR(inode))
+		return NULL;
+
+	/* NB Side effect: iget calls back to yaffs_read_inode(). */
+	/* iget also increments the inode's i_count */
+	/* NB You can't be holding gross_lock or deadlock will happen! */
+
+	return inode;
+}
+
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+		       dev_t rdev)
+{
+	struct inode *inode;
+
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_dev *dev;
+
+	struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+	int error = -ENOSPC;
+	uid_t uid = current->cred->fsuid;
+	gid_t gid =
+	    (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+	if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+		mode |= S_ISGID;
+
+	if (parent) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_mknod: parent object %d type %d",
+			parent->obj_id, parent->variant_type);
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_mknod: could not get parent object");
+		return -EPERM;
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_mknod: making oject for %s, mode %x dev %x",
+		dentry->d_name.name, mode, rdev);
+
+	dev = parent->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	switch (mode & S_IFMT) {
+	default:
+		/* Special (socket, fifo, device...) */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+		obj =
+		    yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+					 gid, old_encode_dev(rdev));
+		break;
+	case S_IFREG:		/* file          */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+		obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+					gid);
+		break;
+	case S_IFDIR:		/* directory */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+		obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+				       uid, gid);
+		break;
+	case S_IFLNK:		/* symlink */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+		obj = NULL;	/* Do we ever get here? */
+		break;
+	}
+
+	/* Can not call yaffs_get_inode() with gross lock held */
+	yaffs_gross_unlock(dev);
+
+	if (obj) {
+		inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+		d_instantiate(dentry, inode);
+		update_dir_time(dir);
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_mknod created object %d count = %d",
+			obj->obj_id, atomic_read(&inode->i_count));
+		error = 0;
+		yaffs_fill_inode_from_obj(dir, parent);
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+		error = -ENOMEM;
+	}
+
+	return error;
+}
+
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	return yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+}
+
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+			struct nameidata *n)
+{
+	return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+		      struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_obj *link = NULL;
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+	obj = yaffs_inode_to_obj(inode);
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	if (!S_ISDIR(inode->i_mode))	/* Don't link directories */
+		link =
+		    yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+				   obj);
+
+	if (link) {
+		old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
+		d_instantiate(dentry, old_dentry->d_inode);
+		atomic_inc(&old_dentry->d_inode->i_count);
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_link link count %d i_count %d",
+			old_dentry->d_inode->i_nlink,
+			atomic_read(&old_dentry->d_inode->i_count));
+	}
+
+	yaffs_gross_unlock(dev);
+
+	if (link) {
+		update_dir_time(dir);
+		return 0;
+	}
+
+	return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+			 const char *symname)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	uid_t uid = current->cred->fsuid;
+	gid_t gid =
+	    (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+	dev = yaffs_inode_to_obj(dir)->my_dev;
+	yaffs_gross_lock(dev);
+	obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+				   S_IFLNK | S_IRWXUGO, uid, gid, symname);
+	yaffs_gross_unlock(dev);
+
+	if (obj) {
+		struct inode *inode;
+
+		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+		d_instantiate(dentry, inode);
+		update_dir_time(dir);
+		yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+		return 0;
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+	}
+
+	return -ENOMEM;
+}
+
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+				   struct nameidata *n)
+{
+	struct yaffs_obj *obj;
+	struct inode *inode = NULL;
+
+	struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+	if (current != yaffs_dev_to_lc(dev)->readdir_process)
+		yaffs_gross_lock(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_lookup for %d:%s",
+		yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+	obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+	obj = yaffs_get_equivalent_obj(obj);	/* in case it was a hardlink */
+
+	/* Can't hold gross lock when calling yaffs_get_inode() */
+	if (current != yaffs_dev_to_lc(dev)->readdir_process)
+		yaffs_gross_unlock(dev);
+
+	if (obj) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_lookup found %d", obj->obj_id);
+
+		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+		if (inode) {
+			yaffs_trace(YAFFS_TRACE_OS, "yaffs_loookup dentry");
+			d_add(dentry, inode);
+			/* return dentry; */
+			return NULL;
+		}
+
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+	}
+
+	d_add(dentry, inode);
+
+	return NULL;
+}
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int ret_val;
+
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_unlink %d:%s",
+		(int)(dir->i_ino), dentry->d_name.name);
+	obj = yaffs_inode_to_obj(dir);
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+	if (ret_val == YAFFS_OK) {
+		dentry->d_inode->i_nlink--;
+		dir->i_version++;
+		yaffs_gross_unlock(dev);
+		mark_inode_dirty(dentry->d_inode);
+		update_dir_time(dir);
+		return 0;
+	}
+	yaffs_gross_unlock(dev);
+	return -ENOTEMPTY;
+}
+
+static int yaffs_sync_object(struct file *file, int datasync)
+{
+
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	struct dentry *dentry = file->f_path.dentry;
+
+	obj = yaffs_dentry_to_obj(dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, "yaffs_sync_object");
+	yaffs_gross_lock(dev);
+	yaffs_flush_file(obj, 1, datasync);
+	yaffs_gross_unlock(dev);
+	return 0;
+}
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct yaffs_dev *dev;
+	int ret_val = YAFFS_FAIL;
+	struct yaffs_obj *target;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+	dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	/* Check if the target is an existing directory that is not empty. */
+	target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+				    new_dentry->d_name.name);
+
+	if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+	    !list_empty(&target->variant.dir_variant.children)) {
+
+		yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+		ret_val = YAFFS_FAIL;
+	} else {
+		/* Now does unlinking internally using shadowing mechanism */
+		yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+		ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+					   old_dentry->d_name.name,
+					   yaffs_inode_to_obj(new_dir),
+					   new_dentry->d_name.name);
+	}
+	yaffs_gross_unlock(dev);
+
+	if (ret_val == YAFFS_OK) {
+		if (target) {
+			new_dentry->d_inode->i_nlink--;
+			mark_inode_dirty(new_dentry->d_inode);
+		}
+
+		update_dir_time(old_dir);
+		if (old_dir != new_dir)
+			update_dir_time(new_dir);
+		return 0;
+	} else {
+		return -ENOTEMPTY;
+	}
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_setattr of object %d",
+		yaffs_inode_to_obj(inode)->obj_id);
+
+	/* Fail if a requested resize >= 2GB */
+	if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+		error = -EINVAL;
+
+	if (error == 0)
+		error = inode_change_ok(inode, attr);
+	if (error == 0) {
+		int result;
+		if (!error) {
+			setattr_copy(inode, attr);
+			yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+			if (attr->ia_valid & ATTR_SIZE) {
+				truncate_setsize(inode, attr->ia_size);
+				inode->i_blocks = (inode->i_size + 511) >> 9;
+			}
+		}
+		dev = yaffs_inode_to_obj(inode)->my_dev;
+		if (attr->ia_valid & ATTR_SIZE) {
+			yaffs_trace(YAFFS_TRACE_OS, "resize to %d(%x)",
+					   (int)(attr->ia_size),
+					   (int)(attr->ia_size));
+		}
+		yaffs_gross_lock(dev);
+		result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+		if (result == YAFFS_OK) {
+			error = 0;
+		} else {
+			error = -EPERM;
+		}
+		yaffs_gross_unlock(dev);
+
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+	return error;
+}
+
+#ifdef CONFIG_YAFFS_XATTR
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+		   const void *value, size_t size, int flags)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+	if (error == 0) {
+		int result;
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		result = yaffs_set_xattrib(obj, name, value, size, flags);
+		if (result == YAFFS_OK)
+			error = 0;
+		else if (result < 0)
+			error = result;
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+	return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name, void *buff,
+		       size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_getxattr \"%s\" from object %d",
+		name, obj->obj_id);
+
+	if (error == 0) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		error = yaffs_get_xattrib(obj, name, buff, size);
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+	return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_removexattr of object %d", obj->obj_id);
+
+	if (error == 0) {
+		int result;
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		result = yaffs_remove_xattrib(obj, name);
+		if (result == YAFFS_OK)
+			error = 0;
+		else if (result < 0)
+			error = result;
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_removexattr done returning %d", error);
+
+	return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_listxattr of object %d", obj->obj_id);
+
+	if (error == 0) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		error = yaffs_list_xattrib(obj, buff, size);
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_listxattr done returning %d", error);
+
+	return error;
+}
+
+#endif
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+	.create = yaffs_create,
+	.lookup = yaffs_lookup,
+	.link = yaffs_link,
+	.unlink = yaffs_unlink,
+	.symlink = yaffs_symlink,
+	.mkdir = yaffs_mkdir,
+	.rmdir = yaffs_unlink,
+	.mknod = yaffs_mknod,
+	.rename = yaffs_rename,
+	.setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+	.setxattr = yaffs_setxattr,
+	.getxattr = yaffs_getxattr,
+	.listxattr = yaffs_listxattr,
+	.removexattr = yaffs_removexattr,
+#endif
+};
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+	struct yaffs_dev *dev;
+	struct yaffs_obj *dir_obj;
+	struct yaffs_obj *next_return;
+	struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+	struct yaffs_dev *dev = dir->my_dev;
+	struct yaffs_search_context *sc =
+	    kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+	if (sc) {
+		sc->dir_obj = dir;
+		sc->dev = dev;
+		if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+			sc->next_return = NULL;
+		else
+			sc->next_return =
+			    list_entry(dir->variant.dir_variant.children.next,
+				       struct yaffs_obj, siblings);
+		INIT_LIST_HEAD(&sc->others);
+		list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+	}
+	return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+	if (sc) {
+		list_del(&sc->others);
+		kfree(sc);
+	}
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+	if (!sc)
+		return;
+
+	if (sc->next_return == NULL ||
+	    list_empty(&sc->dir_obj->variant.dir_variant.children))
+		sc->next_return = NULL;
+	else {
+		struct list_head *next = sc->next_return->siblings.next;
+
+		if (next == &sc->dir_obj->variant.dir_variant.children)
+			sc->next_return = NULL;	/* end of list */
+		else
+			sc->next_return =
+			    list_entry(next, struct yaffs_obj, siblings);
+	}
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+	struct list_head *i;
+	struct yaffs_search_context *sc;
+	struct list_head *search_contexts =
+	    &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+	/* Iterate through the directory search contexts.
+	 * If any are currently on the object being removed, then advance
+	 * the search context to the next object to prevent a hanging pointer.
+	 */
+	list_for_each(i, search_contexts) {
+		if (i) {
+			sc = list_entry(i, struct yaffs_search_context, others);
+			if (sc->next_return == obj)
+				yaffs_search_advance(sc);
+		}
+	}
+
+}
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	struct yaffs_search_context *sc;
+	struct inode *inode = f->f_dentry->d_inode;
+	unsigned long offset, curoffs;
+	struct yaffs_obj *l;
+	int ret_val = 0;
+
+	char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	yaffs_dev_to_lc(dev)->readdir_process = current;
+
+	offset = f->f_pos;
+
+	sc = yaffs_new_search(obj);
+	if (!sc) {
+		ret_val = -ENOMEM;
+		goto out;
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_readdir: starting at %d", (int)offset);
+
+	if (offset == 0) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_readdir: entry . ino %d",
+			(int)inode->i_ino);
+		yaffs_gross_unlock(dev);
+		if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+			yaffs_gross_lock(dev);
+			goto out;
+		}
+		yaffs_gross_lock(dev);
+		offset++;
+		f->f_pos++;
+	}
+	if (offset == 1) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_readdir: entry .. ino %d",
+			(int)f->f_dentry->d_parent->d_inode->i_ino);
+		yaffs_gross_unlock(dev);
+		if (filldir(dirent, "..", 2, offset,
+			    f->f_dentry->d_parent->d_inode->i_ino,
+			    DT_DIR) < 0) {
+			yaffs_gross_lock(dev);
+			goto out;
+		}
+		yaffs_gross_lock(dev);
+		offset++;
+		f->f_pos++;
+	}
+
+	curoffs = 1;
+
+	/* If the directory has changed since the open or last call to
+	   readdir, rewind to after the 2 canned entries. */
+	if (f->f_version != inode->i_version) {
+		offset = 2;
+		f->f_pos = offset;
+		f->f_version = inode->i_version;
+	}
+
+	while (sc->next_return) {
+		curoffs++;
+		l = sc->next_return;
+		if (curoffs >= offset) {
+			int this_inode = yaffs_get_obj_inode(l);
+			int this_type = yaffs_get_obj_type(l);
+
+			yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+			yaffs_trace(YAFFS_TRACE_OS,
+				"yaffs_readdir: %s inode %d",
+				name, yaffs_get_obj_inode(l));
+
+			yaffs_gross_unlock(dev);
+
+			if (filldir(dirent,
+				    name,
+				    strlen(name),
+				    offset, this_inode, this_type) < 0) {
+				yaffs_gross_lock(dev);
+				goto out;
+			}
+
+			yaffs_gross_lock(dev);
+
+			offset++;
+			f->f_pos++;
+		}
+		yaffs_search_advance(sc);
+	}
+
+out:
+	yaffs_search_end(sc);
+	yaffs_dev_to_lc(dev)->readdir_process = NULL;
+	yaffs_gross_unlock(dev);
+
+	return ret_val;
+}
+
+static const struct file_operations yaffs_dir_operations = {
+	.read = generic_read_dir,
+	.readdir = yaffs_readdir,
+	.fsync = yaffs_sync_object,
+	.llseek = generic_file_llseek,
+};
+
+
+
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+{
+	struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+	struct yaffs_dev *dev = obj->my_dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+	  	"yaffs_file_flush object %d (%s)",
+		obj->obj_id, obj->dirty ? "dirty" : "clean");
+
+	yaffs_gross_lock(dev);
+
+	yaffs_flush_file(obj, 1, 0);
+
+	yaffs_gross_unlock(dev);
+
+	return 0;
+}
+
+static const struct file_operations yaffs_file_operations = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = generic_file_aio_read,
+	.aio_write = generic_file_aio_write,
+	.mmap = generic_file_mmap,
+	.flush = yaffs_file_flush,
+	.fsync = yaffs_sync_object,
+	.splice_read = generic_file_splice_read,
+	.splice_write = generic_file_splice_write,
+	.llseek = generic_file_llseek,
+};
+
+
+/* ExportFS support */
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+					  uint32_t generation)
+{
+	return yaffs_iget(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+					  struct fid *fid, int fh_len,
+					  int fh_type)
+{
+	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+				    yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+					  struct fid *fid, int fh_len,
+					  int fh_type)
+{
+	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+				    yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+	struct super_block *sb = dentry->d_inode->i_sb;
+	struct dentry *parent = ERR_PTR(-ENOENT);
+	struct inode *inode;
+	unsigned long parent_ino;
+	struct yaffs_obj *d_obj;
+	struct yaffs_obj *parent_obj;
+
+	d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+	if (d_obj) {
+		parent_obj = d_obj->parent;
+		if (parent_obj) {
+			parent_ino = yaffs_get_obj_inode(parent_obj);
+			inode = yaffs_iget(sb, parent_ino);
+
+			if (IS_ERR(inode)) {
+				parent = ERR_CAST(inode);
+			} else {
+				parent = d_obtain_alias(inode);
+				if (!IS_ERR(parent)) {
+					parent = ERR_PTR(-ENOMEM);
+					iput(inode);
+				}
+			}
+		}
+	}
+
+	return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+	.fh_to_dentry = yaffs2_fh_to_dentry,
+	.fh_to_parent = yaffs2_fh_to_parent,
+	.get_parent = yaffs2_get_parent,
+};
+
+
+/*-----------------------------------------------------------------*/
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+			  int buflen)
+{
+	unsigned char *alias;
+	int ret;
+
+	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+	yaffs_gross_unlock(dev);
+
+	if (!alias)
+		return -ENOMEM;
+
+	ret = vfs_readlink(dentry, buffer, buflen, alias);
+	kfree(alias);
+	return ret;
+}
+
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	unsigned char *alias;
+	void *ret;
+	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+	yaffs_gross_unlock(dev);
+
+	if (!alias) {
+		ret = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	nd_set_link(nd, alias);
+	ret = (void *)alias;
+out:
+	return ret;
+}
+
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+	kfree(alias);
+}
+
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+	/* Clear the association between the inode and
+	 * the struct yaffs_obj.
+	 */
+	obj->my_inode = NULL;
+	yaffs_inode_to_obj_lv(inode) = NULL;
+
+	/* If the object freeing was deferred, then the real
+	 * free happens now.
+	 * This should fix the inode inconsistency problem.
+	 */
+	yaffs_handle_defered_free(obj);
+}
+
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	int deleteme = 0;
+
+	obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_evict_inode: ino %d, count %d %s",
+		(int)inode->i_ino,
+		atomic_read(&inode->i_count),
+		obj ? "object exists" : "null object");
+
+	if (!inode->i_nlink && !is_bad_inode(inode))
+		deleteme = 1;
+	truncate_inode_pages(&inode->i_data, 0);
+	end_writeback(inode);
+
+	if (deleteme && obj) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		yaffs_del_obj(obj);
+		yaffs_gross_unlock(dev);
+	}
+	if (obj) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		yaffs_unstitch_obj(inode, obj);
+		yaffs_gross_unlock(dev);
+	}
+
+}
+
+static void yaffs_touch_super(struct yaffs_dev *dev)
+{
+	struct super_block *sb = yaffs_dev_to_lc(dev)->super;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_touch_super() sb = %p", sb);
+	if (sb)
+		sb->s_dirt = 1;
+}
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+	/* Lifted from jffs2 */
+
+	struct yaffs_obj *obj;
+	unsigned char *pg_buf;
+	int ret;
+
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_readpage_nolock at %08x, size %08x",
+		(unsigned)(pg->index << PAGE_CACHE_SHIFT),
+		(unsigned)PAGE_CACHE_SIZE);
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+	BUG_ON(!PageLocked(pg));
+
+	pg_buf = kmap(pg);
+	/* FIXME: Can kmap fail? */
+
+	yaffs_gross_lock(dev);
+
+	ret = yaffs_file_rd(obj, pg_buf,
+			    pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+	yaffs_gross_unlock(dev);
+
+	if (ret >= 0)
+		ret = 0;
+
+	if (ret) {
+		ClearPageUptodate(pg);
+		SetPageError(pg);
+	} else {
+		SetPageUptodate(pg);
+		ClearPageError(pg);
+	}
+
+	flush_dcache_page(pg);
+	kunmap(pg);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+	return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+	int ret = yaffs_readpage_nolock(f, pg);
+	UnlockPage(pg);
+	return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+	int ret;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+	ret = yaffs_readpage_unlock(f, pg);
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+	return ret;
+}
+
+/* writepage inspired by/stolen from smbfs */
+
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct yaffs_dev *dev;
+	struct address_space *mapping = page->mapping;
+	struct inode *inode;
+	unsigned long end_index;
+	char *buffer;
+	struct yaffs_obj *obj;
+	int n_written = 0;
+	unsigned n_bytes;
+	loff_t i_size;
+
+	if (!mapping)
+		BUG();
+	inode = mapping->host;
+	if (!inode)
+		BUG();
+	i_size = i_size_read(inode);
+
+	end_index = i_size >> PAGE_CACHE_SHIFT;
+
+	if (page->index < end_index)
+		n_bytes = PAGE_CACHE_SIZE;
+	else {
+		n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+		if (page->index > end_index || !n_bytes) {
+			yaffs_trace(YAFFS_TRACE_OS,
+				"yaffs_writepage at %08x, inode size = %08x!!!",
+				(unsigned)(page->index << PAGE_CACHE_SHIFT),
+				(unsigned)inode->i_size);
+			yaffs_trace(YAFFS_TRACE_OS,
+			  "                -> don't care!!");
+
+			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+			set_page_writeback(page);
+			unlock_page(page);
+			end_page_writeback(page);
+			return 0;
+		}
+	}
+
+	if (n_bytes != PAGE_CACHE_SIZE)
+		zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+	get_page(page);
+
+	buffer = kmap(page);
+
+	obj = yaffs_inode_to_obj(inode);
+	dev = obj->my_dev;
+	yaffs_gross_lock(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_writepage at %08x, size %08x",
+		(unsigned)(page->index << PAGE_CACHE_SHIFT), n_bytes);
+	yaffs_trace(YAFFS_TRACE_OS,
+		"writepag0: obj = %05x, ino = %05x",
+		(int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+	n_written = yaffs_wr_file(obj, buffer,
+				  page->index << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+	yaffs_touch_super(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"writepag1: obj = %05x, ino = %05x",
+		(int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+	yaffs_gross_unlock(dev);
+
+	kunmap(page);
+	set_page_writeback(page);
+	unlock_page(page);
+	end_page_writeback(page);
+	put_page(page);
+
+	return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for 
+ * write_begin/end.
+ * For now we just assume few parallel writes and check against a small
+ * number.
+ * Todo: need to do this with a counter to handle parallel reads better.
+ */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+
+	int n_free_chunks;
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+	yaffs_gross_unlock(dev);
+
+	return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	yaffs_gross_unlock(dev);
+}
+
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+			     loff_t pos, unsigned len, unsigned flags,
+			     struct page **pagep, void **fsdata)
+{
+	struct page *pg = NULL;
+	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+	int ret = 0;
+	int space_held = 0;
+
+	/* Get a page */
+	pg = grab_cache_page_write_begin(mapping, index, flags);
+
+	*pagep = pg;
+	if (!pg) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	yaffs_trace(YAFFS_TRACE_OS,
+		"start yaffs_write_begin index %d(%x) uptodate %d",
+		(int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+	/* Get fs space */
+	space_held = yaffs_hold_space(filp);
+
+	if (!space_held) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	/* Update page if required */
+
+	if (!Page_Uptodate(pg))
+		ret = yaffs_readpage_nolock(filp, pg);
+
+	if (ret)
+		goto out;
+
+	/* Happy path return */
+	yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+	return 0;
+
+out:
+	yaffs_trace(YAFFS_TRACE_OS,
+		"end yaffs_write_begin fail returning %d", ret);
+	if (space_held)
+		yaffs_release_space(filp);
+	if (pg) {
+		unlock_page(pg);
+		page_cache_release(pg);
+	}
+	return ret;
+}
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+				loff_t * pos)
+{
+	struct yaffs_obj *obj;
+	int n_written, ipos;
+	struct inode *inode;
+	struct yaffs_dev *dev;
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	inode = f->f_dentry->d_inode;
+
+	if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+		ipos = inode->i_size;
+	else
+		ipos = *pos;
+
+	if (!obj)
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_file_write: hey obj is null!");
+	else
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_file_write about to write writing %u(%x) bytes to object %d at %d(%x)",
+			(unsigned)n, (unsigned)n, obj->obj_id, ipos, ipos);
+
+	n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+	yaffs_touch_super(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_file_write: %d(%x) bytes written",
+		(unsigned)n, (unsigned)n);
+
+	if (n_written > 0) {
+		ipos += n_written;
+		*pos = ipos;
+		if (ipos > inode->i_size) {
+			inode->i_size = ipos;
+			inode->i_blocks = (ipos + 511) >> 9;
+
+			yaffs_trace(YAFFS_TRACE_OS,
+				"yaffs_file_write size updated to %d bytes, %d blocks",
+				ipos, (int)(inode->i_blocks));
+		}
+
+	}
+	yaffs_gross_unlock(dev);
+	return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+			   loff_t pos, unsigned len, unsigned copied,
+			   struct page *pg, void *fsdadata)
+{
+	int ret = 0;
+	void *addr, *kva;
+	uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+	kva = kmap(pg);
+	addr = kva + offset_into_page;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_write_end addr %p pos %x n_bytes %d",
+		addr, (unsigned)pos, copied);
+
+	ret = yaffs_file_write(filp, addr, copied, &pos);
+
+	if (ret != copied) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_write_end not same size ret %d  copied %d",
+			ret, copied);
+		SetPageError(pg);
+	}
+
+	kunmap(pg);
+
+	yaffs_release_space(filp);
+	unlock_page(pg);
+	page_cache_release(pg);
+	return ret;
+}
+
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+	struct super_block *sb = dentry->d_sb;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+	yaffs_gross_lock(dev);
+
+	buf->f_type = YAFFS_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_namelen = 255;
+
+	if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+		/* Do this if chunk size is not a power of 2 */
+
+		uint64_t bytes_in_dev;
+		uint64_t bytes_free;
+
+		bytes_in_dev =
+		    ((uint64_t)
+		     ((dev->param.end_block - dev->param.start_block +
+		       1))) * ((uint64_t) (dev->param.chunks_per_block *
+					   dev->data_bytes_per_chunk));
+
+		do_div(bytes_in_dev, sb->s_blocksize);	/* bytes_in_dev becomes the number of blocks */
+		buf->f_blocks = bytes_in_dev;
+
+		bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+		    ((uint64_t) (dev->data_bytes_per_chunk));
+
+		do_div(bytes_free, sb->s_blocksize);
+
+		buf->f_bfree = bytes_free;
+
+	} else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+		buf->f_blocks =
+		    (dev->param.end_block - dev->param.start_block + 1) *
+		    dev->param.chunks_per_block /
+		    (sb->s_blocksize / dev->data_bytes_per_chunk);
+		buf->f_bfree =
+		    yaffs_get_n_free_chunks(dev) /
+		    (sb->s_blocksize / dev->data_bytes_per_chunk);
+	} else {
+		buf->f_blocks =
+		    (dev->param.end_block - dev->param.start_block + 1) *
+		    dev->param.chunks_per_block *
+		    (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+		buf->f_bfree =
+		    yaffs_get_n_free_chunks(dev) *
+		    (dev->data_bytes_per_chunk / sb->s_blocksize);
+	}
+
+	buf->f_files = 0;
+	buf->f_ffree = 0;
+	buf->f_bavail = buf->f_bfree;
+
+	yaffs_gross_unlock(dev);
+	return 0;
+}
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+	struct inode *iptr;
+	struct yaffs_obj *obj;
+
+	list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+		obj = yaffs_inode_to_obj(iptr);
+		if (obj) {
+			yaffs_trace(YAFFS_TRACE_OS,
+				"flushing obj %d", obj->obj_id);
+			yaffs_flush_file(obj, 1, 0);
+		}
+	}
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+	if (!dev)
+		return;
+
+	yaffs_flush_inodes(sb);
+	yaffs_update_dirty_dirs(dev);
+	yaffs_flush_whole_cache(dev);
+	if (do_checkpoint)
+		yaffs_checkpoint_save(dev);
+}
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+	unsigned erased_chunks =
+	    dev->n_erased_blocks * dev->param.chunks_per_block;
+	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+	unsigned scattered = 0;	/* Free chunks not in an erased block */
+
+	if (erased_chunks < dev->n_free_chunks)
+		scattered = (dev->n_free_chunks - erased_chunks);
+
+	if (!context->bg_running)
+		return 0;
+	else if (scattered < (dev->param.chunks_per_block * 2))
+		return 0;
+	else if (erased_chunks > dev->n_free_chunks / 2)
+		return 0;
+	else if (erased_chunks > dev->n_free_chunks / 4)
+		return 1;
+	else
+		return 2;
+}
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+	unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+	unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+	int do_checkpoint;
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+		"yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+		gc_urgent,
+		sb->s_dirt ? "dirty" : "clean",
+		request_checkpoint ? "checkpoint requested" : "no checkpoint",
+		oneshot_checkpoint ? " one-shot" : "");
+
+	yaffs_gross_lock(dev);
+	do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+			 oneshot_checkpoint) && !dev->is_checkpointed;
+
+	if (sb->s_dirt || do_checkpoint) {
+		yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+		sb->s_dirt = 0;
+		if (oneshot_checkpoint)
+			yaffs_auto_checkpoint &= ~4;
+	}
+	yaffs_gross_unlock(dev);
+
+	return 0;
+}
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB: 
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+void yaffs_background_waker(unsigned long data)
+{
+	wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+	struct yaffs_dev *dev = (struct yaffs_dev *)data;
+	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+	unsigned long now = jiffies;
+	unsigned long next_dir_update = now;
+	unsigned long next_gc = now;
+	unsigned long expires;
+	unsigned int urgency;
+
+	int gc_result;
+	struct timer_list timer;
+
+	yaffs_trace(YAFFS_TRACE_BACKGROUND,
+		"yaffs_background starting for dev %p", (void *)dev);
+
+	set_freezable();
+	while (context->bg_running) {
+		yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+		if (kthread_should_stop())
+			break;
+
+		if (try_to_freeze())
+			continue;
+
+		yaffs_gross_lock(dev);
+
+		now = jiffies;
+
+		if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+			yaffs_update_dirty_dirs(dev);
+			next_dir_update = now + HZ;
+		}
+
+		if (time_after(now, next_gc) && yaffs_bg_enable) {
+			if (!dev->is_checkpointed) {
+				urgency = yaffs_bg_gc_urgency(dev);
+				gc_result = yaffs_bg_gc(dev, urgency);
+				if (urgency > 1)
+					next_gc = now + HZ / 20 + 1;
+				else if (urgency > 0)
+					next_gc = now + HZ / 10 + 1;
+				else
+					next_gc = now + HZ * 2;
+			} else	{
+			        /*
+				 * gc not running so set to next_dir_update
+				 * to cut down on wake ups
+				 */
+				next_gc = next_dir_update;
+                        }
+		}
+		yaffs_gross_unlock(dev);
+		expires = next_dir_update;
+		if (time_before(next_gc, expires))
+			expires = next_gc;
+		if (time_before(expires, now))
+			expires = now + HZ;
+
+		Y_INIT_TIMER(&timer);
+		timer.expires = expires + 1;
+		timer.data = (unsigned long)current;
+		timer.function = yaffs_background_waker;
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_timer(&timer);
+		schedule();
+		del_timer_sync(&timer);
+	}
+
+	return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+	int retval = 0;
+	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+	if (dev->read_only)
+		return -1;
+
+	context->bg_running = 1;
+
+	context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+					 (void *)dev, "yaffs-bg-%d",
+					 context->mount_id);
+
+	if (IS_ERR(context->bg_thread)) {
+		retval = PTR_ERR(context->bg_thread);
+		context->bg_thread = NULL;
+		context->bg_running = 0;
+	}
+	return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+	struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+	ctxt->bg_running = 0;
+
+	if (ctxt->bg_thread) {
+		kthread_stop(ctxt->bg_thread);
+		ctxt->bg_thread = NULL;
+	}
+}
+
+static void yaffs_write_super(struct super_block *sb)
+{
+	unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+		"yaffs_write_super%s",
+		request_checkpoint ? " checkpt" : "");
+
+	yaffs_do_sync_fs(sb, request_checkpoint);
+
+}
+
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+{
+	unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+		"yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+	yaffs_do_sync_fs(sb, request_checkpoint);
+
+	return 0;
+}
+
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+
+
+struct yaffs_options {
+	int inband_tags;
+	int skip_checkpoint_read;
+	int skip_checkpoint_write;
+	int no_cache;
+	int tags_ecc_on;
+	int tags_ecc_overridden;
+	int lazy_loading_enabled;
+	int lazy_loading_overridden;
+	int empty_lost_and_found;
+	int empty_lost_and_found_overridden;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+			       const char *options_str)
+{
+	char cur_opt[MAX_OPT_LEN + 1];
+	int p;
+	int error = 0;
+
+	/* Parse through the options which is a comma seperated list */
+
+	while (options_str && *options_str && !error) {
+		memset(cur_opt, 0, MAX_OPT_LEN + 1);
+		p = 0;
+
+		while (*options_str == ',')
+			options_str++;
+
+		while (*options_str && *options_str != ',') {
+			if (p < MAX_OPT_LEN) {
+				cur_opt[p] = *options_str;
+				p++;
+			}
+			options_str++;
+		}
+
+		if (!strcmp(cur_opt, "inband-tags")) {
+			options->inband_tags = 1;
+		} else if (!strcmp(cur_opt, "tags-ecc-off")) {
+			options->tags_ecc_on = 0;
+			options->tags_ecc_overridden = 1;
+		} else if (!strcmp(cur_opt, "tags-ecc-on")) {
+			options->tags_ecc_on = 1;
+			options->tags_ecc_overridden = 1;
+		} else if (!strcmp(cur_opt, "lazy-loading-off")) {
+			options->lazy_loading_enabled = 0;
+			options->lazy_loading_overridden = 1;
+		} else if (!strcmp(cur_opt, "lazy-loading-on")) {
+			options->lazy_loading_enabled = 1;
+			options->lazy_loading_overridden = 1;
+		} else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+			options->empty_lost_and_found = 0;
+			options->empty_lost_and_found_overridden = 1;
+		} else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+			options->empty_lost_and_found = 1;
+			options->empty_lost_and_found_overridden = 1;
+		} else if (!strcmp(cur_opt, "no-cache")) {
+			options->no_cache = 1;
+		} else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+			options->skip_checkpoint_read = 1;
+		} else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+			options->skip_checkpoint_write = 1;
+		} else if (!strcmp(cur_opt, "no-checkpoint")) {
+			options->skip_checkpoint_read = 1;
+			options->skip_checkpoint_write = 1;
+		} else {
+			printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+			       cur_opt);
+			error = 1;
+		}
+	}
+
+	return error;
+}
+
+static struct address_space_operations yaffs_file_address_operations = {
+	.readpage = yaffs_readpage,
+	.writepage = yaffs_writepage,
+	.write_begin = yaffs_write_begin,
+	.write_end = yaffs_write_end,
+};
+
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+	.setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+	.setxattr = yaffs_setxattr,
+	.getxattr = yaffs_getxattr,
+	.listxattr = yaffs_listxattr,
+	.removexattr = yaffs_removexattr,
+#endif
+};
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+	.readlink = yaffs_readlink,
+	.follow_link = yaffs_follow_link,
+	.put_link = yaffs_put_link,
+	.setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+	.setxattr = yaffs_setxattr,
+	.getxattr = yaffs_getxattr,
+	.listxattr = yaffs_listxattr,
+	.removexattr = yaffs_removexattr,
+#endif
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+				      struct yaffs_obj *obj)
+{
+	if (inode && obj) {
+
+		/* Check mode against the variant type and attempt to repair if broken. */
+		u32 mode = obj->yst_mode;
+		switch (obj->variant_type) {
+		case YAFFS_OBJECT_TYPE_FILE:
+			if (!S_ISREG(mode)) {
+				obj->yst_mode &= ~S_IFMT;
+				obj->yst_mode |= S_IFREG;
+			}
+
+			break;
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+			if (!S_ISLNK(mode)) {
+				obj->yst_mode &= ~S_IFMT;
+				obj->yst_mode |= S_IFLNK;
+			}
+
+			break;
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+			if (!S_ISDIR(mode)) {
+				obj->yst_mode &= ~S_IFMT;
+				obj->yst_mode |= S_IFDIR;
+			}
+
+			break;
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+		default:
+			/* TODO? */
+			break;
+		}
+
+		inode->i_flags |= S_NOATIME;
+
+		inode->i_ino = obj->obj_id;
+		inode->i_mode = obj->yst_mode;
+		inode->i_uid = obj->yst_uid;
+		inode->i_gid = obj->yst_gid;
+
+		inode->i_rdev = old_decode_dev(obj->yst_rdev);
+
+		inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+		inode->i_atime.tv_nsec = 0;
+		inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+		inode->i_mtime.tv_nsec = 0;
+		inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+		inode->i_ctime.tv_nsec = 0;
+		inode->i_size = yaffs_get_obj_length(obj);
+		inode->i_blocks = (inode->i_size + 511) >> 9;
+
+		inode->i_nlink = yaffs_get_obj_link_count(obj);
+
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_fill_inode mode %x uid %d gid %d size %d count %d",
+			inode->i_mode, inode->i_uid, inode->i_gid,
+			(int)inode->i_size, atomic_read(&inode->i_count));
+
+		switch (obj->yst_mode & S_IFMT) {
+		default:	/* fifo, device or socket */
+			init_special_inode(inode, obj->yst_mode,
+					   old_decode_dev(obj->yst_rdev));
+			break;
+		case S_IFREG:	/* file */
+			inode->i_op = &yaffs_file_inode_operations;
+			inode->i_fop = &yaffs_file_operations;
+			inode->i_mapping->a_ops =
+			    &yaffs_file_address_operations;
+			break;
+		case S_IFDIR:	/* directory */
+			inode->i_op = &yaffs_dir_inode_operations;
+			inode->i_fop = &yaffs_dir_operations;
+			break;
+		case S_IFLNK:	/* symlink */
+			inode->i_op = &yaffs_symlink_inode_operations;
+			break;
+		}
+
+		yaffs_inode_to_obj_lv(inode) = obj;
+
+		obj->my_inode = inode;
+
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_fill_inode invalid parameters");
+	}
+}
+
+static void yaffs_put_super(struct super_block *sb)
+{
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_put_super");
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+		"Shutting down yaffs background thread");
+	yaffs_bg_stop(dev);
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+		"yaffs background thread shut down");
+
+	yaffs_gross_lock(dev);
+
+	yaffs_flush_super(sb, 1);
+
+	if (yaffs_dev_to_lc(dev)->put_super_fn)
+		yaffs_dev_to_lc(dev)->put_super_fn(sb);
+
+	yaffs_deinitialise(dev);
+
+	yaffs_gross_unlock(dev);
+	mutex_lock(&yaffs_context_lock);
+	list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+	mutex_unlock(&yaffs_context_lock);
+
+	if (yaffs_dev_to_lc(dev)->spare_buffer) {
+		kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+		yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+	}
+
+	kfree(dev);
+}
+
+static void yaffs_mtd_put_super(struct super_block *sb)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_super_to_dev(sb));
+
+	if (mtd->sync)
+		mtd->sync(mtd);
+
+	put_mtd_device(mtd);
+}
+
+static const struct super_operations yaffs_super_ops = {
+	.statfs = yaffs_statfs,
+	.put_super = yaffs_put_super,
+	.evict_inode = yaffs_evict_inode,
+	.sync_fs = yaffs_sync_fs,
+	.write_super = yaffs_write_super,
+};
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+						     struct super_block *sb,
+						     void *data, int silent)
+{
+	int n_blocks;
+	struct inode *inode = NULL;
+	struct dentry *root;
+	struct yaffs_dev *dev = 0;
+	char devname_buf[BDEVNAME_SIZE + 1];
+	struct mtd_info *mtd;
+	int err;
+	char *data_str = (char *)data;
+	struct yaffs_linux_context *context = NULL;
+	struct yaffs_param *param;
+
+	int read_only = 0;
+
+	struct yaffs_options options;
+
+	unsigned mount_id;
+	int found;
+	struct yaffs_linux_context *context_iterator;
+	struct list_head *l;
+
+	sb->s_magic = YAFFS_MAGIC;
+	sb->s_op = &yaffs_super_ops;
+	sb->s_flags |= MS_NOATIME;
+
+	read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+	sb->s_export_op = &yaffs_export_ops;
+
+	if (!sb)
+		printk(KERN_INFO "yaffs: sb is NULL\n");
+	else if (!sb->s_dev)
+		printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+	else if (!yaffs_devname(sb, devname_buf))
+		printk(KERN_INFO "yaffs: devname is NULL\n");
+	else
+		printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+		       sb->s_dev,
+		       yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+	if (!data_str)
+		data_str = "";
+
+	printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+	memset(&options, 0, sizeof(options));
+
+	if (yaffs_parse_options(&options, data_str)) {
+		/* Option parsing failed */
+		return NULL;
+	}
+
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_super: Using yaffs%d", yaffs_version);
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"Attempting MTD mount of %u.%u,\"%s\"",
+		MAJOR(sb->s_dev), MINOR(sb->s_dev),
+		yaffs_devname(sb, devname_buf));
+
+	/* Check it's an mtd device..... */
+	if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+		return NULL;	/* This isn't an mtd device */
+
+	/* Get the device */
+	mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+	if (!mtd) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"MTD device #%u doesn't appear to exist",
+			MINOR(sb->s_dev));
+		return NULL;
+	}
+	/* Check it's NAND */
+	if (mtd->type != MTD_NANDFLASH) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"MTD device is not NAND it's type %d",
+			mtd->type);
+		return NULL;
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS, " erase %p", mtd->erase);
+	yaffs_trace(YAFFS_TRACE_OS, " read %p", mtd->read);
+	yaffs_trace(YAFFS_TRACE_OS, " write %p", mtd->write);
+	yaffs_trace(YAFFS_TRACE_OS, " readoob %p", mtd->read_oob);
+	yaffs_trace(YAFFS_TRACE_OS, " writeoob %p", mtd->write_oob);
+	yaffs_trace(YAFFS_TRACE_OS, " block_isbad %p", mtd->block_isbad);
+	yaffs_trace(YAFFS_TRACE_OS, " block_markbad %p", mtd->block_markbad);
+	yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+	yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+	yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+	yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+
+#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+	if (yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+		yaffs_version = 2;
+	}
+
+	/* Added NCB 26/5/2006 for completeness */
+	if (yaffs_version == 2 && !options.inband_tags
+	    && WRITE_SIZE(mtd) == 512) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+		yaffs_version = 1;
+	}
+#endif
+
+	if (yaffs_version == 2) {
+		/* Check for version 2 style functions */
+		if (!mtd->erase ||
+		    !mtd->block_isbad ||
+		    !mtd->block_markbad ||
+		    !mtd->read ||
+		    !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"MTD device does not support required functions");
+			return NULL;
+		}
+
+		if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+		     mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+		    !options.inband_tags) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"MTD device does not have the right page sizes");
+			return NULL;
+		}
+	} else {
+		/* Check for V1 style functions */
+		if (!mtd->erase ||
+		    !mtd->read ||
+		    !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"MTD device does not support required functions");
+			return NULL;
+		}
+
+		if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+		    mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"MTD device does not support have the right page sizes");
+			return NULL;
+		}
+	}
+
+	/* OK, so if we got here, we have an MTD that's NAND and looks
+	 * like it has the right capabilities
+	 * Set the struct yaffs_dev up for mtd
+	 */
+
+	if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+		read_only = 1;
+		printk(KERN_INFO
+		       "yaffs: mtd is read only, setting superblock read only");
+		sb->s_flags |= MS_RDONLY;
+	}
+
+	dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+	context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+	if (!dev || !context) {
+		if (dev)
+			kfree(dev);
+		if (context)
+			kfree(context);
+		dev = NULL;
+		context = NULL;
+	}
+
+	if (!dev) {
+		/* Deep shit could not allocate device structure */
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"yaffs_read_super failed trying to allocate yaffs_dev");
+		return NULL;
+	}
+	memset(dev, 0, sizeof(struct yaffs_dev));
+	param = &(dev->param);
+
+	memset(context, 0, sizeof(struct yaffs_linux_context));
+	dev->os_context = context;
+	INIT_LIST_HEAD(&(context->context_list));
+	context->dev = dev;
+	context->super = sb;
+
+	dev->read_only = read_only;
+
+	sb->s_fs_info = dev;
+
+	dev->driver_context = mtd;
+	param->name = mtd->name;
+
+	/* Set up the memory size parameters.... */
+
+	n_blocks =
+	    YCALCBLOCKS(mtd->size,
+			(YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
+
+	param->start_block = 0;
+	param->end_block = n_blocks - 1;
+	param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+	param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+	param->n_reserved_blocks = 5;
+	param->n_caches = (options.no_cache) ? 0 : 10;
+	param->inband_tags = options.inband_tags;
+
+#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+	param->disable_lazy_load = 1;
+#endif
+#ifdef CONFIG_YAFFS_XATTR
+	param->enable_xattr = 1;
+#endif
+	if (options.lazy_loading_overridden)
+		param->disable_lazy_load = !options.lazy_loading_enabled;
+
+#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC
+	param->no_tags_ecc = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND
+#else
+	param->defered_dir_update = 1;
+#endif
+
+	if (options.tags_ecc_overridden)
+		param->no_tags_ecc = !options.tags_ecc_on;
+
+#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
+	param->empty_lost_n_found = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING
+	param->refresh_period = 0;
+#else
+	param->refresh_period = 500;
+#endif
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+	param->always_check_erased = 1;
+#endif
+
+	if (options.empty_lost_and_found_overridden)
+		param->empty_lost_n_found = options.empty_lost_and_found;
+
+	/* ... and the functions. */
+	if (yaffs_version == 2) {
+		param->write_chunk_tags_fn = nandmtd2_write_chunk_tags;
+		param->read_chunk_tags_fn = nandmtd2_read_chunk_tags;
+		param->bad_block_fn = nandmtd2_mark_block_bad;
+		param->query_block_fn = nandmtd2_query_block;
+		yaffs_dev_to_lc(dev)->spare_buffer = 
+		                kmalloc(mtd->oobsize, GFP_NOFS);
+		param->is_yaffs2 = 1;
+		param->total_bytes_per_chunk = mtd->writesize;
+		param->chunks_per_block = mtd->erasesize / mtd->writesize;
+		n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+		param->start_block = 0;
+		param->end_block = n_blocks - 1;
+	} else {
+		/* use the MTD interface in yaffs_mtdif1.c */
+		param->write_chunk_tags_fn = nandmtd1_write_chunk_tags;
+		param->read_chunk_tags_fn = nandmtd1_read_chunk_tags;
+		param->bad_block_fn = nandmtd1_mark_block_bad;
+		param->query_block_fn = nandmtd1_query_block;
+		param->is_yaffs2 = 0;
+	}
+	/* ... and common functions */
+	param->erase_fn = nandmtd_erase_block;
+	param->initialise_flash_fn = nandmtd_initialise;
+
+	yaffs_dev_to_lc(dev)->put_super_fn = yaffs_mtd_put_super;
+
+	param->sb_dirty_fn = yaffs_touch_super;
+	param->gc_control = yaffs_gc_control_callback;
+
+	yaffs_dev_to_lc(dev)->super = sb;
+
+#ifndef CONFIG_YAFFS_DOES_ECC
+	param->use_nand_ecc = 1;
+#endif
+
+	param->skip_checkpt_rd = options.skip_checkpoint_read;
+	param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+	mutex_lock(&yaffs_context_lock);
+	/* Get a mount id */
+	found = 0;
+	for (mount_id = 0; !found; mount_id++) {
+		found = 1;
+		list_for_each(l, &yaffs_context_list) {
+			context_iterator =
+			    list_entry(l, struct yaffs_linux_context,
+				       context_list);
+			if (context_iterator->mount_id == mount_id)
+				found = 0;
+		}
+	}
+	context->mount_id = mount_id;
+
+	list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+		      &yaffs_context_list);
+	mutex_unlock(&yaffs_context_lock);
+
+	/* Directory search handling... */
+	INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+	param->remove_obj_fn = yaffs_remove_obj_callback;
+
+	mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+	yaffs_gross_lock(dev);
+
+	err = yaffs_guts_initialise(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_super: guts initialised %s",
+		(err == YAFFS_OK) ? "OK" : "FAILED");
+
+	if (err == YAFFS_OK)
+		yaffs_bg_start(dev);
+
+	if (!context->bg_thread)
+		param->defered_dir_update = 0;
+
+	/* Release lock before yaffs_get_inode() */
+	yaffs_gross_unlock(dev);
+
+	/* Create root inode */
+	if (err == YAFFS_OK)
+		inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+	if (!inode)
+		return NULL;
+
+	inode->i_op = &yaffs_dir_inode_operations;
+	inode->i_fop = &yaffs_dir_operations;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+	root = d_alloc_root(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: d_alloc_root done");
+
+	if (!root) {
+		iput(inode);
+		return NULL;
+	}
+	sb->s_root = root;
+	sb->s_dirt = !dev->is_checkpointed;
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs_read_super: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+	return sb;
+}
+
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+					 int silent)
+{
+	return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static int yaffs_read_super(struct file_system_type *fs,
+			    int flags, const char *dev_name,
+			    void *data, struct vfsmount *mnt)
+{
+
+	return get_sb_bdev(fs, flags, dev_name, data,
+			   yaffs_internal_read_super_mtd, mnt);
+}
+
+static struct file_system_type yaffs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "yaffs",
+	.get_sb = yaffs_read_super,
+	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV,
+};
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+					  int silent)
+{
+	return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static int yaffs2_read_super(struct file_system_type *fs,
+			     int flags, const char *dev_name, void *data,
+			     struct vfsmount *mnt)
+{
+	return get_sb_bdev(fs, flags, dev_name, data,
+			   yaffs2_internal_read_super_mtd, mnt);
+}
+
+static struct file_system_type yaffs2_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "yaffs2",
+	.get_sb = yaffs2_read_super,
+	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV,
+};
+#endif /* CONFIG_YAFFS_YAFFS2 */
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+	struct yaffs_param *param = &dev->param;
+	buf += sprintf(buf, "start_block........... %d\n", param->start_block);
+	buf += sprintf(buf, "end_block............. %d\n", param->end_block);
+	buf += sprintf(buf, "total_bytes_per_chunk. %d\n",
+			param->total_bytes_per_chunk);
+	buf += sprintf(buf, "use_nand_ecc.......... %d\n",
+			param->use_nand_ecc);
+	buf += sprintf(buf, "no_tags_ecc........... %d\n", param->no_tags_ecc);
+	buf += sprintf(buf, "is_yaffs2............. %d\n", param->is_yaffs2);
+	buf += sprintf(buf, "inband_tags........... %d\n", param->inband_tags);
+	buf += sprintf(buf, "empty_lost_n_found.... %d\n",
+			param->empty_lost_n_found);
+	buf += sprintf(buf, "disable_lazy_load..... %d\n",
+			param->disable_lazy_load);
+	buf += sprintf(buf, "refresh_period........ %d\n",
+			param->refresh_period);
+	buf += sprintf(buf, "n_caches.............. %d\n", param->n_caches);
+	buf += sprintf(buf, "n_reserved_blocks..... %d\n",
+			param->n_reserved_blocks);
+	buf += sprintf(buf, "always_check_erased... %d\n",
+			param->always_check_erased);
+
+	return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+	buf +=
+	    sprintf(buf, "data_bytes_per_chunk.. %d\n",
+		    dev->data_bytes_per_chunk);
+	buf += sprintf(buf, "chunk_grp_bits........ %d\n", dev->chunk_grp_bits);
+	buf += sprintf(buf, "chunk_grp_size........ %d\n", dev->chunk_grp_size);
+	buf +=
+	    sprintf(buf, "n_erased_blocks....... %d\n", dev->n_erased_blocks);
+	buf +=
+	    sprintf(buf, "blocks_in_checkpt..... %d\n", dev->blocks_in_checkpt);
+	buf += sprintf(buf, "\n");
+	buf += sprintf(buf, "n_tnodes.............. %d\n", dev->n_tnodes);
+	buf += sprintf(buf, "n_obj................. %d\n", dev->n_obj);
+	buf += sprintf(buf, "n_free_chunks......... %d\n", dev->n_free_chunks);
+	buf += sprintf(buf, "\n");
+	buf += sprintf(buf, "n_page_writes......... %u\n", dev->n_page_writes);
+	buf += sprintf(buf, "n_page_reads.......... %u\n", dev->n_page_reads);
+	buf += sprintf(buf, "n_erasures............ %u\n", dev->n_erasures);
+	buf += sprintf(buf, "n_gc_copies........... %u\n", dev->n_gc_copies);
+	buf += sprintf(buf, "all_gcs............... %u\n", dev->all_gcs);
+	buf +=
+	    sprintf(buf, "passive_gc_count...... %u\n", dev->passive_gc_count);
+	buf +=
+	    sprintf(buf, "oldest_dirty_gc_count. %u\n",
+		    dev->oldest_dirty_gc_count);
+	buf += sprintf(buf, "n_gc_blocks........... %u\n", dev->n_gc_blocks);
+	buf += sprintf(buf, "bg_gcs................ %u\n", dev->bg_gcs);
+	buf +=
+	    sprintf(buf, "n_retired_writes...... %u\n", dev->n_retired_writes);
+	buf +=
+	    sprintf(buf, "n_retired_blocks...... %u\n", dev->n_retired_blocks);
+	buf += sprintf(buf, "n_ecc_fixed........... %u\n", dev->n_ecc_fixed);
+	buf += sprintf(buf, "n_ecc_unfixed......... %u\n", dev->n_ecc_unfixed);
+	buf +=
+	    sprintf(buf, "n_tags_ecc_fixed...... %u\n", dev->n_tags_ecc_fixed);
+	buf +=
+	    sprintf(buf, "n_tags_ecc_unfixed.... %u\n",
+		    dev->n_tags_ecc_unfixed);
+	buf += sprintf(buf, "cache_hits............ %u\n", dev->cache_hits);
+	buf +=
+	    sprintf(buf, "n_deleted_files....... %u\n", dev->n_deleted_files);
+	buf +=
+	    sprintf(buf, "n_unlinked_files...... %u\n", dev->n_unlinked_files);
+	buf += sprintf(buf, "refresh_count......... %u\n", dev->refresh_count);
+	buf += sprintf(buf, "n_bg_deletions........ %u\n", dev->n_bg_deletions);
+
+	return buf;
+}
+
+static int yaffs_proc_read(char *page,
+			   char **start,
+			   off_t offset, int count, int *eof, void *data)
+{
+	struct list_head *item;
+	char *buf = page;
+	int step = offset;
+	int n = 0;
+
+	/* Get proc_file_read() to step 'offset' by one on each sucessive call.
+	 * We use 'offset' (*ppos) to indicate where we are in dev_list.
+	 * This also assumes the user has posted a read buffer large
+	 * enough to hold the complete output; but that's life in /proc.
+	 */
+
+	*(int *)start = 1;
+
+	/* Print header first */
+	if (step == 0)
+		buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__ "\n");
+	else if (step == 1)
+		buf += sprintf(buf, "\n");
+	else {
+		step -= 2;
+
+		mutex_lock(&yaffs_context_lock);
+
+		/* Locate and print the Nth entry.  Order N-squared but N is small. */
+		list_for_each(item, &yaffs_context_list) {
+			struct yaffs_linux_context *dc =
+			    list_entry(item, struct yaffs_linux_context,
+				       context_list);
+			struct yaffs_dev *dev = dc->dev;
+
+			if (n < (step & ~1)) {
+				n += 2;
+				continue;
+			}
+			if ((step & 1) == 0) {
+				buf +=
+				    sprintf(buf, "\nDevice %d \"%s\"\n", n,
+					    dev->param.name);
+				buf = yaffs_dump_dev_part0(buf, dev);
+			} else {
+				buf = yaffs_dump_dev_part1(buf, dev);
+                        }
+
+			break;
+		}
+		mutex_unlock(&yaffs_context_lock);
+	}
+
+	return buf - page < count ? buf - page : count;
+}
+
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+	char *mask_name;
+	unsigned mask_bitfield;
+} mask_flags[] = {
+	{"allocate", YAFFS_TRACE_ALLOCATE}, 
+	{"always", YAFFS_TRACE_ALWAYS},
+	{"background", YAFFS_TRACE_BACKGROUND},
+	{"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+	{"buffers", YAFFS_TRACE_BUFFERS},
+	{"bug", YAFFS_TRACE_BUG},
+	{"checkpt", YAFFS_TRACE_CHECKPOINT},
+	{"deletion", YAFFS_TRACE_DELETION},
+	{"erase", YAFFS_TRACE_ERASE},
+	{"error", YAFFS_TRACE_ERROR},
+	{"gc_detail", YAFFS_TRACE_GC_DETAIL},
+	{"gc", YAFFS_TRACE_GC},
+	{"lock", YAFFS_TRACE_LOCK},
+	{"mtd", YAFFS_TRACE_MTD},
+	{"nandaccess", YAFFS_TRACE_NANDACCESS},
+	{"os", YAFFS_TRACE_OS},
+	{"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+	{"scan", YAFFS_TRACE_SCAN},
+	{"mount", YAFFS_TRACE_MOUNT},
+	{"tracing", YAFFS_TRACE_TRACING},
+	{"sync", YAFFS_TRACE_SYNC},
+	{"write", YAFFS_TRACE_WRITE},
+	{"verify", YAFFS_TRACE_VERIFY},
+	{"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+	{"verify_full", YAFFS_TRACE_VERIFY_FULL},
+	{"verify_all", YAFFS_TRACE_VERIFY_ALL},
+	{"all", 0xffffffff},
+	{"none", 0},
+	{NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+					  unsigned long count, void *data)
+{
+	unsigned rg = 0, mask_bitfield;
+	char *end;
+	char *mask_name;
+	const char *x;
+	char substring[MAX_MASK_NAME_LENGTH + 1];
+	int i;
+	int done = 0;
+	int add, len = 0;
+	int pos = 0;
+
+	rg = yaffs_trace_mask;
+
+	while (!done && (pos < count)) {
+		done = 1;
+		while ((pos < count) && isspace(buf[pos]))
+			pos++;
+
+		switch (buf[pos]) {
+		case '+':
+		case '-':
+		case '=':
+			add = buf[pos];
+			pos++;
+			break;
+
+		default:
+			add = ' ';
+			break;
+		}
+		mask_name = NULL;
+
+		mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+		if (end > buf + pos) {
+			mask_name = "numeral";
+			len = end - (buf + pos);
+			pos += len;
+			done = 0;
+		} else {
+			for (x = buf + pos, i = 0;
+			     (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+			     i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+				substring[i] = *x;
+			substring[i] = '\0';
+
+			for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+				if (strcmp(substring, mask_flags[i].mask_name)
+				    == 0) {
+					mask_name = mask_flags[i].mask_name;
+					mask_bitfield =
+					    mask_flags[i].mask_bitfield;
+					done = 0;
+					break;
+				}
+			}
+		}
+
+		if (mask_name != NULL) {
+			done = 0;
+			switch (add) {
+			case '-':
+				rg &= ~mask_bitfield;
+				break;
+			case '+':
+				rg |= mask_bitfield;
+				break;
+			case '=':
+				rg = mask_bitfield;
+				break;
+			default:
+				rg |= mask_bitfield;
+				break;
+			}
+		}
+	}
+
+	yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+	printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+	if (rg & YAFFS_TRACE_ALWAYS) {
+		for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+			char flag;
+			flag = ((rg & mask_flags[i].mask_bitfield) ==
+				mask_flags[i].mask_bitfield) ? '+' : '-';
+			printk(KERN_DEBUG "%c%s\n", flag,
+			       mask_flags[i].mask_name);
+		}
+	}
+
+	return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+			    unsigned long count, void *data)
+{
+	return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+	struct file_system_type *fst;
+	int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+	{&yaffs_fs_type, 0},
+	{&yaffs2_fs_type, 0},
+	{NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+	int error = 0;
+	struct file_system_to_install *fsinst;
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n");
+#endif
+
+	mutex_init(&yaffs_context_lock);
+
+	/* Install the proc_fs entries */
+	my_proc_entry = create_proc_entry("yaffs",
+					  S_IRUGO | S_IFREG, YPROC_ROOT);
+
+	if (my_proc_entry) {
+		my_proc_entry->write_proc = yaffs_proc_write;
+		my_proc_entry->read_proc = yaffs_proc_read;
+		my_proc_entry->data = NULL;
+	} else {
+		return -ENOMEM;
+        }
+
+
+	/* Now add the file system entries */
+
+	fsinst = fs_to_install;
+
+	while (fsinst->fst && !error) {
+		error = register_filesystem(fsinst->fst);
+		if (!error)
+			fsinst->installed = 1;
+		fsinst++;
+	}
+
+	/* Any errors? uninstall  */
+	if (error) {
+		fsinst = fs_to_install;
+
+		while (fsinst->fst) {
+			if (fsinst->installed) {
+				unregister_filesystem(fsinst->fst);
+				fsinst->installed = 0;
+			}
+			fsinst++;
+		}
+	}
+
+	return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+	struct file_system_to_install *fsinst;
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs built " __DATE__ " " __TIME__ " removing.");
+
+	remove_proc_entry("yaffs", YPROC_ROOT);
+
+	fsinst = fs_to_install;
+
+	while (fsinst->fst) {
+		if (fsinst->installed) {
+			unregister_filesystem(fsinst->fst);
+			fsinst->installed = 0;
+		}
+		fsinst++;
+	}
+}
+
+module_init(init_yaffs_fs)
+    module_exit(exit_yaffs_fs)
+
+    MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 0000000..9eb6030
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,433 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+	struct yaffs_ext_tags tags;
+	int blk;
+	int result;
+
+	int chunk;
+	int c;
+	int deleted;
+	enum yaffs_block_state state;
+	struct yaffs_obj *hard_list = NULL;
+	struct yaffs_block_info *bi;
+	u32 seq_number;
+	struct yaffs_obj_hdr *oh;
+	struct yaffs_obj *in;
+	struct yaffs_obj *parent;
+
+	int alloc_failed = 0;
+
+	struct yaffs_shadow_fixer *shadow_fixers = NULL;
+
+	u8 *chunk_data;
+
+	yaffs_trace(YAFFS_TRACE_SCAN,
+		"yaffs1_scan starts  intstartblk %d intendblk %d...",
+		dev->internal_start_block, dev->internal_end_block);
+
+	chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+	dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+	/* Scan all the blocks to determine their state */
+	bi = dev->block_info;
+	for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+	     blk++) {
+		yaffs_clear_chunk_bits(dev, blk);
+		bi->pages_in_use = 0;
+		bi->soft_del_pages = 0;
+
+		yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+		bi->block_state = state;
+		bi->seq_number = seq_number;
+
+		if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+			bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+		yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+			"Block scanning block %d state %d seq %d",
+			blk, state, seq_number);
+
+		if (state == YAFFS_BLOCK_STATE_DEAD) {
+			yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+				"block %d is bad", blk);
+		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+			yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+			dev->n_erased_blocks++;
+			dev->n_free_chunks += dev->param.chunks_per_block;
+		}
+		bi++;
+	}
+
+	/* For each block.... */
+	for (blk = dev->internal_start_block;
+	     !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+		cond_resched();
+
+		bi = yaffs_get_block_info(dev, blk);
+		state = bi->block_state;
+
+		deleted = 0;
+
+		/* For each chunk in each block that needs scanning.... */
+		for (c = 0; !alloc_failed && c < dev->param.chunks_per_block &&
+		     state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+			/* Read the tags and decide what to do */
+			chunk = blk * dev->param.chunks_per_block + c;
+
+			result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+							  &tags);
+
+			/* Let's have a good look at this chunk... */
+
+			if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED
+			    || tags.is_deleted) {
+				/* YAFFS1 only...
+				 * A deleted chunk
+				 */
+				deleted++;
+				dev->n_free_chunks++;
+				/*T((" %d %d deleted\n",blk,c)); */
+			} else if (!tags.chunk_used) {
+				/* An unassigned chunk in the block
+				 * This means that either the block is empty or
+				 * this is the one being allocated from
+				 */
+
+				if (c == 0) {
+					/* We're looking at the first chunk in the block so the block is unused */
+					state = YAFFS_BLOCK_STATE_EMPTY;
+					dev->n_erased_blocks++;
+				} else {
+					/* this is the block being allocated from */
+					yaffs_trace(YAFFS_TRACE_SCAN,
+						" Allocating from %d %d",
+						blk, c);
+					state = YAFFS_BLOCK_STATE_ALLOCATING;
+					dev->alloc_block = blk;
+					dev->alloc_page = c;
+					dev->alloc_block_finder = blk;
+					/* Set block finder here to encourage the allocator to go forth from here. */
+
+				}
+
+				dev->n_free_chunks +=
+				    (dev->param.chunks_per_block - c);
+			} else if (tags.chunk_id > 0) {
+				/* chunk_id > 0 so it is a data chunk... */
+				unsigned int endpos;
+
+				yaffs_set_chunk_bit(dev, blk, c);
+				bi->pages_in_use++;
+
+				in = yaffs_find_or_create_by_number(dev,
+								    tags.obj_id,
+								    YAFFS_OBJECT_TYPE_FILE);
+				/* PutChunkIntoFile checks for a clash (two data chunks with
+				 * the same chunk_id).
+				 */
+
+				if (!in)
+					alloc_failed = 1;
+
+				if (in) {
+					if (!yaffs_put_chunk_in_file
+					    (in, tags.chunk_id, chunk, 1))
+						alloc_failed = 1;
+				}
+
+				endpos =
+				    (tags.chunk_id -
+				     1) * dev->data_bytes_per_chunk +
+				    tags.n_bytes;
+				if (in
+				    && in->variant_type ==
+				    YAFFS_OBJECT_TYPE_FILE
+				    && in->variant.file_variant.scanned_size <
+				    endpos) {
+					in->variant.file_variant.scanned_size =
+					    endpos;
+					if (!dev->param.use_header_file_size) {
+						in->variant.
+						    file_variant.file_size =
+						    in->variant.
+						    file_variant.scanned_size;
+					}
+
+				}
+				/* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id));   */
+			} else {
+				/* chunk_id == 0, so it is an ObjectHeader.
+				 * Thus, we read in the object header and make the object
+				 */
+				yaffs_set_chunk_bit(dev, blk, c);
+				bi->pages_in_use++;
+
+				result = yaffs_rd_chunk_tags_nand(dev, chunk,
+								  chunk_data,
+								  NULL);
+
+				oh = (struct yaffs_obj_hdr *)chunk_data;
+
+				in = yaffs_find_by_number(dev, tags.obj_id);
+				if (in && in->variant_type != oh->type) {
+					/* This should not happen, but somehow
+					 * Wev'e ended up with an obj_id that has been reused but not yet
+					 * deleted, and worse still it has changed type. Delete the old object.
+					 */
+
+					yaffs_del_obj(in);
+
+					in = 0;
+				}
+
+				in = yaffs_find_or_create_by_number(dev,
+								    tags.obj_id,
+								    oh->type);
+
+				if (!in)
+					alloc_failed = 1;
+
+				if (in && oh->shadows_obj > 0) {
+
+					struct yaffs_shadow_fixer *fixer;
+					fixer =
+						kmalloc(sizeof
+						(struct yaffs_shadow_fixer),
+						GFP_NOFS);
+					if (fixer) {
+						fixer->next = shadow_fixers;
+						shadow_fixers = fixer;
+						fixer->obj_id = tags.obj_id;
+						fixer->shadowed_id =
+						    oh->shadows_obj;
+						yaffs_trace(YAFFS_TRACE_SCAN,
+							" Shadow fixer: %d shadows %d",
+							fixer->obj_id,
+							fixer->shadowed_id);
+
+					}
+
+				}
+
+				if (in && in->valid) {
+					/* We have already filled this one. We have a duplicate and need to resolve it. */
+
+					unsigned existing_serial = in->serial;
+					unsigned new_serial =
+					    tags.serial_number;
+
+					if (((existing_serial + 1) & 3) ==
+					    new_serial) {
+						/* Use new one - destroy the exisiting one */
+						yaffs_chunk_del(dev,
+								in->hdr_chunk,
+								1, __LINE__);
+						in->valid = 0;
+					} else {
+						/* Use existing - destroy this one. */
+						yaffs_chunk_del(dev, chunk, 1,
+								__LINE__);
+					}
+				}
+
+				if (in && !in->valid &&
+				    (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+				     tags.obj_id ==
+				     YAFFS_OBJECTID_LOSTNFOUND)) {
+					/* We only load some info, don't fiddle with directory structure */
+					in->valid = 1;
+					in->variant_type = oh->type;
+
+					in->yst_mode = oh->yst_mode;
+					yaffs_load_attribs(in, oh);
+					in->hdr_chunk = chunk;
+					in->serial = tags.serial_number;
+
+				} else if (in && !in->valid) {
+					/* we need to load this info */
+
+					in->valid = 1;
+					in->variant_type = oh->type;
+
+					in->yst_mode = oh->yst_mode;
+					yaffs_load_attribs(in, oh);
+					in->hdr_chunk = chunk;
+					in->serial = tags.serial_number;
+
+					yaffs_set_obj_name_from_oh(in, oh);
+					in->dirty = 0;
+
+					/* directory stuff...
+					 * hook up to parent
+					 */
+
+					parent =
+					    yaffs_find_or_create_by_number
+					    (dev, oh->parent_obj_id,
+					     YAFFS_OBJECT_TYPE_DIRECTORY);
+					if (!parent)
+						alloc_failed = 1;
+					if (parent && parent->variant_type ==
+					    YAFFS_OBJECT_TYPE_UNKNOWN) {
+						/* Set up as a directory */
+						parent->variant_type =
+						    YAFFS_OBJECT_TYPE_DIRECTORY;
+						INIT_LIST_HEAD(&parent->
+							       variant.dir_variant.children);
+					} else if (!parent
+						   || parent->variant_type !=
+						   YAFFS_OBJECT_TYPE_DIRECTORY) {
+						/* Hoosterman, another problem....
+						 * We're trying to use a non-directory as a directory
+						 */
+
+						yaffs_trace(YAFFS_TRACE_ERROR,
+							"yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+							);
+						parent = dev->lost_n_found;
+					}
+
+					yaffs_add_obj_to_dir(parent, in);
+
+					if (0 && (parent == dev->del_dir ||
+						  parent ==
+						  dev->unlinked_dir)) {
+						in->deleted = 1;	/* If it is unlinked at start up then it wants deleting */
+						dev->n_deleted_files++;
+					}
+					/* Note re hardlinks.
+					 * Since we might scan a hardlink before its equivalent object is scanned
+					 * we put them all in a list.
+					 * After scanning is complete, we should have all the objects, so we run through this
+					 * list and fix up all the chains.
+					 */
+
+					switch (in->variant_type) {
+					case YAFFS_OBJECT_TYPE_UNKNOWN:
+						/* Todo got a problem */
+						break;
+					case YAFFS_OBJECT_TYPE_FILE:
+						if (dev->param.
+						    use_header_file_size)
+
+							in->variant.
+							    file_variant.file_size
+							    = oh->file_size;
+
+						break;
+					case YAFFS_OBJECT_TYPE_HARDLINK:
+						in->variant.
+						    hardlink_variant.equiv_id =
+						    oh->equiv_id;
+						in->hard_links.next =
+						    (struct list_head *)
+						    hard_list;
+						hard_list = in;
+						break;
+					case YAFFS_OBJECT_TYPE_DIRECTORY:
+						/* Do nothing */
+						break;
+					case YAFFS_OBJECT_TYPE_SPECIAL:
+						/* Do nothing */
+						break;
+					case YAFFS_OBJECT_TYPE_SYMLINK:
+						in->variant.symlink_variant.
+						    alias =
+						    yaffs_clone_str(oh->alias);
+						if (!in->variant.
+						    symlink_variant.alias)
+							alloc_failed = 1;
+						break;
+					}
+
+				}
+			}
+		}
+
+		if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+			/* If we got this far while scanning, then the block is fully allocated. */
+			state = YAFFS_BLOCK_STATE_FULL;
+		}
+
+		if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+			/* If the block was partially allocated then treat it as fully allocated. */
+			state = YAFFS_BLOCK_STATE_FULL;
+			dev->alloc_block = -1;
+		}
+
+		bi->block_state = state;
+
+		/* Now let's see if it was dirty */
+		if (bi->pages_in_use == 0 &&
+		    !bi->has_shrink_hdr &&
+		    bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+			yaffs_block_became_dirty(dev, blk);
+		}
+
+	}
+
+	/* Ok, we've done all the scanning.
+	 * Fix up the hard link chains.
+	 * We should now have scanned all the objects, now it's time to add these
+	 * hardlinks.
+	 */
+
+	yaffs_link_fixup(dev, hard_list);
+
+	/* Fix up any shadowed objects */
+	{
+		struct yaffs_shadow_fixer *fixer;
+		struct yaffs_obj *obj;
+
+		while (shadow_fixers) {
+			fixer = shadow_fixers;
+			shadow_fixers = fixer->next;
+			/* Complete the rename transaction by deleting the shadowed object
+			 * then setting the object header to unshadowed.
+			 */
+			obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+			if (obj)
+				yaffs_del_obj(obj);
+
+			obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+			if (obj)
+				yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+			kfree(fixer);
+		}
+	}
+
+	yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+	if (alloc_failed)
+		return YAFFS_FAIL;
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 0000000..db23e04
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 0000000..33397af
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1598 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+	int i;
+	unsigned seq;
+	unsigned block_no = 0;
+	struct yaffs_block_info *b;
+
+	if (!dev->param.is_yaffs2)
+		return;
+
+	/* Find the oldest dirty sequence number. */
+	seq = dev->seq_number + 1;
+	b = dev->block_info;
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+		    (b->pages_in_use - b->soft_del_pages) <
+		    dev->param.chunks_per_block && b->seq_number < seq) {
+			seq = b->seq_number;
+			block_no = i;
+		}
+		b++;
+	}
+
+	if (block_no) {
+		dev->oldest_dirty_seq = seq;
+		dev->oldest_dirty_block = block_no;
+	}
+
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (!dev->oldest_dirty_seq)
+		yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear 
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+				   struct yaffs_block_info *bi)
+{
+
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+		dev->oldest_dirty_seq = 0;
+		dev->oldest_dirty_block = 0;
+	}
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+				    struct yaffs_block_info *bi)
+{
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (dev->oldest_dirty_seq) {
+		if (dev->oldest_dirty_seq > bi->seq_number) {
+			dev->oldest_dirty_seq = bi->seq_number;
+			dev->oldest_dirty_block = block_no;
+		}
+	}
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+	if (!dev->param.is_yaffs2)
+		return 1;	/* disqualification only applies to yaffs2. */
+
+	if (!bi->has_shrink_hdr)
+		return 1;	/* can gc */
+
+	yaffs2_find_oldest_dirty_seq(dev);
+
+	/* Can't do gc of this block if there are any blocks older than this one that have
+	 * discarded pages.
+	 */
+	return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev * dev)
+{
+	u32 b;
+
+	u32 oldest = 0;
+	u32 oldest_seq = 0;
+
+	struct yaffs_block_info *bi;
+
+	if (!dev->param.is_yaffs2)
+		return oldest;
+
+	/*
+	 * If refresh period < 10 then refreshing is disabled.
+	 */
+	if (dev->param.refresh_period < 10)
+		return oldest;
+
+	/*
+	 * Fix broken values.
+	 */
+	if (dev->refresh_skip > dev->param.refresh_period)
+		dev->refresh_skip = dev->param.refresh_period;
+
+	if (dev->refresh_skip > 0)
+		return oldest;
+
+	/*
+	 * Refresh skip is now zero.
+	 * We'll do a refresh this time around....
+	 * Update the refresh skip and find the oldest block.
+	 */
+	dev->refresh_skip = dev->param.refresh_period;
+	dev->refresh_count++;
+	bi = dev->block_info;
+	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+			if (oldest < 1 || bi->seq_number < oldest_seq) {
+				oldest = b;
+				oldest_seq = bi->seq_number;
+			}
+		}
+		bi++;
+	}
+
+	if (oldest > 0) {
+		yaffs_trace(YAFFS_TRACE_GC,
+			"GC refresh count %d selected block %d with seq_number %d",
+			dev->refresh_count, oldest, oldest_seq);
+	}
+
+	return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+	int nblocks;
+
+	if (!dev->param.is_yaffs2)
+		return 0;
+
+	nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+	return !dev->param.skip_checkpt_wr &&
+	    !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+	int retval;
+
+	if (!dev->param.is_yaffs2)
+		return 0;
+
+	if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+		/* Not a valid value so recalculate */
+		int n_bytes = 0;
+		int n_blocks;
+		int dev_blocks =
+		    (dev->param.end_block - dev->param.start_block + 1);
+
+		n_bytes += sizeof(struct yaffs_checkpt_validity);
+		n_bytes += sizeof(struct yaffs_checkpt_dev);
+		n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+		n_bytes += dev_blocks * dev->chunk_bit_stride;
+		n_bytes +=
+		    (sizeof(struct yaffs_checkpt_obj) +
+		     sizeof(u32)) * (dev->n_obj);
+		n_bytes += (dev->tnode_size + sizeof(u32)) * (dev->n_tnodes);
+		n_bytes += sizeof(struct yaffs_checkpt_validity);
+		n_bytes += sizeof(u32);	/* checksum */
+
+		/* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
+
+		n_blocks =
+		    (n_bytes /
+		     (dev->data_bytes_per_chunk *
+		      dev->param.chunks_per_block)) + 3;
+
+		dev->checkpoint_blocks_required = n_blocks;
+	}
+
+	retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+	if (retval < 0)
+		retval = 0;
+	return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+	struct yaffs_checkpt_validity cp;
+
+	memset(&cp, 0, sizeof(cp));
+
+	cp.struct_type = sizeof(cp);
+	cp.magic = YAFFS_MAGIC;
+	cp.version = YAFFS_CHECKPOINT_VERSION;
+	cp.head = (head) ? 1 : 0;
+
+	return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+	struct yaffs_checkpt_validity cp;
+	int ok;
+
+	ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+	if (ok)
+		ok = (cp.struct_type == sizeof(cp)) &&
+		    (cp.magic == YAFFS_MAGIC) &&
+		    (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+		    (cp.head == ((head) ? 1 : 0));
+	return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+				      struct yaffs_dev *dev)
+{
+	cp->n_erased_blocks = dev->n_erased_blocks;
+	cp->alloc_block = dev->alloc_block;
+	cp->alloc_page = dev->alloc_page;
+	cp->n_free_chunks = dev->n_free_chunks;
+
+	cp->n_deleted_files = dev->n_deleted_files;
+	cp->n_unlinked_files = dev->n_unlinked_files;
+	cp->n_bg_deletions = dev->n_bg_deletions;
+	cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+				     struct yaffs_checkpt_dev *cp)
+{
+	dev->n_erased_blocks = cp->n_erased_blocks;
+	dev->alloc_block = cp->alloc_block;
+	dev->alloc_page = cp->alloc_page;
+	dev->n_free_chunks = cp->n_free_chunks;
+
+	dev->n_deleted_files = cp->n_deleted_files;
+	dev->n_unlinked_files = cp->n_unlinked_files;
+	dev->n_bg_deletions = cp->n_bg_deletions;
+	dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+	struct yaffs_checkpt_dev cp;
+	u32 n_bytes;
+	u32 n_blocks =
+	    (dev->internal_end_block - dev->internal_start_block + 1);
+
+	int ok;
+
+	/* Write device runtime values */
+	yaffs2_dev_to_checkpt_dev(&cp, dev);
+	cp.struct_type = sizeof(cp);
+
+	ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+	/* Write block info */
+	if (ok) {
+		n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+		ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) ==
+		      n_bytes);
+	}
+
+	/* Write chunk bits */
+	if (ok) {
+		n_bytes = n_blocks * dev->chunk_bit_stride;
+		ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) ==
+		      n_bytes);
+	}
+	return ok ? 1 : 0;
+
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+	struct yaffs_checkpt_dev cp;
+	u32 n_bytes;
+	u32 n_blocks =
+	    (dev->internal_end_block - dev->internal_start_block + 1);
+
+	int ok;
+
+	ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+	if (!ok)
+		return 0;
+
+	if (cp.struct_type != sizeof(cp))
+		return 0;
+
+	yaffs_checkpt_dev_to_dev(dev, &cp);
+
+	n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+	ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+	if (!ok)
+		return 0;
+	n_bytes = n_blocks * dev->chunk_bit_stride;
+
+	ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+	return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+				   struct yaffs_obj *obj)
+{
+
+	cp->obj_id = obj->obj_id;
+	cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+	cp->hdr_chunk = obj->hdr_chunk;
+	cp->variant_type = obj->variant_type;
+	cp->deleted = obj->deleted;
+	cp->soft_del = obj->soft_del;
+	cp->unlinked = obj->unlinked;
+	cp->fake = obj->fake;
+	cp->rename_allowed = obj->rename_allowed;
+	cp->unlink_allowed = obj->unlink_allowed;
+	cp->serial = obj->serial;
+	cp->n_data_chunks = obj->n_data_chunks;
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+		cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+	else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+		cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int taffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+				     struct yaffs_checkpt_obj *cp)
+{
+
+	struct yaffs_obj *parent;
+
+	if (obj->variant_type != cp->variant_type) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+			cp->obj_id, cp->variant_type, cp->hdr_chunk,
+			obj->variant_type);
+		return 0;
+	}
+
+	obj->obj_id = cp->obj_id;
+
+	if (cp->parent_id)
+		parent = yaffs_find_or_create_by_number(obj->my_dev,
+							cp->parent_id,
+							YAFFS_OBJECT_TYPE_DIRECTORY);
+	else
+		parent = NULL;
+
+	if (parent) {
+		if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+				cp->obj_id, cp->parent_id,
+				cp->variant_type, cp->hdr_chunk,
+				parent->variant_type);
+			return 0;
+		}
+		yaffs_add_obj_to_dir(parent, obj);
+	}
+
+	obj->hdr_chunk = cp->hdr_chunk;
+	obj->variant_type = cp->variant_type;
+	obj->deleted = cp->deleted;
+	obj->soft_del = cp->soft_del;
+	obj->unlinked = cp->unlinked;
+	obj->fake = cp->fake;
+	obj->rename_allowed = cp->rename_allowed;
+	obj->unlink_allowed = cp->unlink_allowed;
+	obj->serial = cp->serial;
+	obj->n_data_chunks = cp->n_data_chunks;
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+		obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+	else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+		obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+	if (obj->hdr_chunk > 0)
+		obj->lazy_loaded = 1;
+	return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+				       struct yaffs_tnode *tn, u32 level,
+				       int chunk_offset)
+{
+	int i;
+	struct yaffs_dev *dev = in->my_dev;
+	int ok = 1;
+
+	if (tn) {
+		if (level > 0) {
+
+			for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+				if (tn->internal[i]) {
+					ok = yaffs2_checkpt_tnode_worker(in,
+									 tn->
+									 internal
+									 [i],
+									 level -
+									 1,
+									 (chunk_offset
+									  <<
+									  YAFFS_TNODES_INTERNAL_BITS)
+									 + i);
+				}
+			}
+		} else if (level == 0) {
+			u32 base_offset =
+			    chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+			ok = (yaffs2_checkpt_wr
+			      (dev, &base_offset,
+			       sizeof(base_offset)) == sizeof(base_offset));
+			if (ok)
+				ok = (yaffs2_checkpt_wr
+				      (dev, tn,
+				       dev->tnode_size) == dev->tnode_size);
+		}
+	}
+
+	return ok;
+
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+	u32 end_marker = ~0;
+	int ok = 1;
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+		ok = yaffs2_checkpt_tnode_worker(obj,
+						 obj->variant.file_variant.top,
+						 obj->variant.file_variant.
+						 top_level, 0);
+		if (ok)
+			ok = (yaffs2_checkpt_wr
+			      (obj->my_dev, &end_marker,
+			       sizeof(end_marker)) == sizeof(end_marker));
+	}
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+	u32 base_chunk;
+	int ok = 1;
+	struct yaffs_dev *dev = obj->my_dev;
+	struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+	struct yaffs_tnode *tn;
+	int nread = 0;
+
+	ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+	      sizeof(base_chunk));
+
+	while (ok && (~base_chunk)) {
+		nread++;
+		/* Read level 0 tnode */
+
+		tn = yaffs_get_tnode(dev);
+		if (tn) {
+			ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+			      dev->tnode_size);
+		} else {
+			ok = 0;
+                }
+
+		if (tn && ok)
+			ok = yaffs_add_find_tnode_0(dev,
+						    file_stuct_ptr,
+						    base_chunk, tn) ? 1 : 0;
+
+		if (ok)
+			ok = (yaffs2_checkpt_rd
+			      (dev, &base_chunk,
+			       sizeof(base_chunk)) == sizeof(base_chunk));
+
+	}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"Checkpoint read tnodes %d records, last %d. ok %d",
+		nread, base_chunk, ok);
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_checkpt_obj cp;
+	int i;
+	int ok = 1;
+	struct list_head *lh;
+
+	/* Iterate through the objects in each hash entry,
+	 * dumping them to the checkpointing stream.
+	 */
+
+	for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+		list_for_each(lh, &dev->obj_bucket[i].list) {
+			if (lh) {
+				obj =
+				    list_entry(lh, struct yaffs_obj, hash_link);
+				if (!obj->defered_free) {
+					yaffs2_obj_checkpt_obj(&cp, obj);
+					cp.struct_type = sizeof(cp);
+
+					yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+						"Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+						cp.obj_id, cp.parent_id,
+						cp.variant_type, cp.hdr_chunk, obj);
+
+					ok = (yaffs2_checkpt_wr
+					      (dev, &cp,
+					       sizeof(cp)) == sizeof(cp));
+
+					if (ok
+					    && obj->variant_type ==
+					    YAFFS_OBJECT_TYPE_FILE)
+						ok = yaffs2_wr_checkpt_tnodes
+						    (obj);
+				}
+			}
+		}
+	}
+
+	/* Dump end of list */
+	memset(&cp, 0xFF, sizeof(struct yaffs_checkpt_obj));
+	cp.struct_type = sizeof(cp);
+
+	if (ok)
+		ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_checkpt_obj cp;
+	int ok = 1;
+	int done = 0;
+	struct yaffs_obj *hard_list = NULL;
+
+	while (ok && !done) {
+		ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+		if (cp.struct_type != sizeof(cp)) {
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+				"struct size %d instead of %d ok %d",
+				cp.struct_type, (int)sizeof(cp), ok);
+			ok = 0;
+		}
+
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"Checkpoint read object %d parent %d type %d chunk %d ",
+			cp.obj_id, cp.parent_id, cp.variant_type,
+			cp.hdr_chunk);
+
+		if (ok && cp.obj_id == ~0) {
+			done = 1;
+		} else if (ok) {
+			obj =
+			    yaffs_find_or_create_by_number(dev, cp.obj_id,
+							   cp.variant_type);
+			if (obj) {
+				ok = taffs2_checkpt_obj_to_obj(obj, &cp);
+				if (!ok)
+					break;
+				if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+					ok = yaffs2_rd_checkpt_tnodes(obj);
+				} else if (obj->variant_type ==
+					   YAFFS_OBJECT_TYPE_HARDLINK) {
+					obj->hard_links.next =
+					    (struct list_head *)hard_list;
+					hard_list = obj;
+				}
+			} else {
+				ok = 0;
+                        }
+		}
+	}
+
+	if (ok)
+		yaffs_link_fixup(dev, hard_list);
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+	u32 checkpt_sum;
+	int ok;
+
+	yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+	ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+	      sizeof(checkpt_sum));
+
+	if (!ok)
+		return 0;
+
+	return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+	u32 checkpt_sum0;
+	u32 checkpt_sum1;
+	int ok;
+
+	yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+	ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+	      sizeof(checkpt_sum1));
+
+	if (!ok)
+		return 0;
+
+	if (checkpt_sum0 != checkpt_sum1)
+		return 0;
+
+	return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+	int ok = 1;
+
+	if (!yaffs2_checkpt_required(dev)) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"skipping checkpoint write");
+		ok = 0;
+	}
+
+	if (ok)
+		ok = yaffs2_checkpt_open(dev, 1);
+
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint validity");
+		ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint device");
+		ok = yaffs2_wr_checkpt_dev(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint objects");
+		ok = yaffs2_wr_checkpt_objs(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint validity");
+		ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+	}
+
+	if (ok)
+		ok = yaffs2_wr_checkpt_sum(dev);
+
+	if (!yaffs_checkpt_close(dev))
+		ok = 0;
+
+	if (ok)
+		dev->is_checkpointed = 1;
+	else
+		dev->is_checkpointed = 0;
+
+	return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+	int ok = 1;
+
+	if (!dev->param.is_yaffs2)
+		ok = 0;
+
+	if (ok && dev->param.skip_checkpt_rd) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"skipping checkpoint read");
+		ok = 0;
+	}
+
+	if (ok)
+		ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint validity");
+		ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint device");
+		ok = yaffs2_rd_checkpt_dev(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint objects");
+		ok = yaffs2_rd_checkpt_objs(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint validity");
+		ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+	}
+
+	if (ok) {
+		ok = yaffs2_rd_checkpt_sum(dev);
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint checksum %d", ok);
+	}
+
+	if (!yaffs_checkpt_close(dev))
+		ok = 0;
+
+	if (ok)
+		dev->is_checkpointed = 1;
+	else
+		dev->is_checkpointed = 0;
+
+	return ok ? 1 : 0;
+
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+	if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+		dev->is_checkpointed = 0;
+		yaffs2_checkpt_invalidate_stream(dev);
+	}
+	if (dev->param.sb_dirty_fn)
+		dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"save entry: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	yaffs_verify_objects(dev);
+	yaffs_verify_blocks(dev);
+	yaffs_verify_free_chunks(dev);
+
+	if (!dev->is_checkpointed) {
+		yaffs2_checkpt_invalidate(dev);
+		yaffs2_wr_checkpt_data(dev);
+	}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+		"save exit: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+	int retval;
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"restore entry: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	retval = yaffs2_rd_checkpt_data(dev);
+
+	if (dev->is_checkpointed) {
+		yaffs_verify_objects(dev);
+		yaffs_verify_blocks(dev);
+		yaffs_verify_free_chunks(dev);
+	}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"restore exit: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+	/* if new_size > old_file_size.
+	 * We're going to be writing a hole.
+	 * If the hole is small then write zeros otherwise write a start of hole marker.
+	 */
+
+	loff_t old_file_size;
+	int increase;
+	int small_hole;
+	int result = YAFFS_OK;
+	struct yaffs_dev *dev = NULL;
+
+	u8 *local_buffer = NULL;
+
+	int small_increase_ok = 0;
+
+	if (!obj)
+		return YAFFS_FAIL;
+
+	if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+		return YAFFS_FAIL;
+
+	dev = obj->my_dev;
+
+	/* Bail out if not yaffs2 mode */
+	if (!dev->param.is_yaffs2)
+		return YAFFS_OK;
+
+	old_file_size = obj->variant.file_variant.file_size;
+
+	if (new_size <= old_file_size)
+		return YAFFS_OK;
+
+	increase = new_size - old_file_size;
+
+	if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+	    yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+		small_hole = 1;
+	else
+		small_hole = 0;
+
+	if (small_hole)
+		local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+	if (local_buffer) {
+		/* fill hole with zero bytes */
+		int pos = old_file_size;
+		int this_write;
+		int written;
+		memset(local_buffer, 0, dev->data_bytes_per_chunk);
+		small_increase_ok = 1;
+
+		while (increase > 0 && small_increase_ok) {
+			this_write = increase;
+			if (this_write > dev->data_bytes_per_chunk)
+				this_write = dev->data_bytes_per_chunk;
+			written =
+			    yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+					     0);
+			if (written == this_write) {
+				pos += this_write;
+				increase -= this_write;
+			} else {
+				small_increase_ok = 0;
+                        }
+		}
+
+		yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+
+		/* If we were out of space then reverse any chunks we've added */
+		if (!small_increase_ok)
+			yaffs_resize_file_down(obj, old_file_size);
+	}
+
+	if (!small_increase_ok &&
+	    obj->parent &&
+	    obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+	    obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+		/* Write a hole start header with the old file size */
+		yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+	}
+
+	return result;
+
+}
+
+struct yaffs_block_index {
+	int seq;
+	int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+	int aseq = ((struct yaffs_block_index *)a)->seq;
+	int bseq = ((struct yaffs_block_index *)b)->seq;
+	int ablock = ((struct yaffs_block_index *)a)->block;
+	int bblock = ((struct yaffs_block_index *)b)->block;
+	if (aseq == bseq)
+		return ablock - bblock;
+	else
+		return aseq - bseq;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+	struct yaffs_ext_tags tags;
+	int blk;
+	int block_iter;
+	int start_iter;
+	int end_iter;
+	int n_to_scan = 0;
+
+	int chunk;
+	int result;
+	int c;
+	int deleted;
+	enum yaffs_block_state state;
+	struct yaffs_obj *hard_list = NULL;
+	struct yaffs_block_info *bi;
+	u32 seq_number;
+	struct yaffs_obj_hdr *oh;
+	struct yaffs_obj *in;
+	struct yaffs_obj *parent;
+	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+	int is_unlinked;
+	u8 *chunk_data;
+
+	int file_size;
+	int is_shrink;
+	int found_chunks;
+	int equiv_id;
+	int alloc_failed = 0;
+
+	struct yaffs_block_index *block_index = NULL;
+	int alt_block_index = 0;
+
+	yaffs_trace(YAFFS_TRACE_SCAN,
+		"yaffs2_scan_backwards starts  intstartblk %d intendblk %d...",
+		dev->internal_start_block, dev->internal_end_block);
+
+	dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+	block_index = kmalloc(n_blocks * sizeof(struct yaffs_block_index),
+			GFP_NOFS);
+
+	if (!block_index) {
+		block_index =
+		    vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+		alt_block_index = 1;
+	}
+
+	if (!block_index) {
+		yaffs_trace(YAFFS_TRACE_SCAN,
+			"yaffs2_scan_backwards() could not allocate block index!"
+			);
+		return YAFFS_FAIL;
+	}
+
+	dev->blocks_in_checkpt = 0;
+
+	chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+	/* Scan all the blocks to determine their state */
+	bi = dev->block_info;
+	for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+	     blk++) {
+		yaffs_clear_chunk_bits(dev, blk);
+		bi->pages_in_use = 0;
+		bi->soft_del_pages = 0;
+
+		yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+		bi->block_state = state;
+		bi->seq_number = seq_number;
+
+		if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+			bi->block_state = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+		if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+			bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+		yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+			"Block scanning block %d state %d seq %d",
+			blk, state, seq_number);
+
+		if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+			dev->blocks_in_checkpt++;
+
+		} else if (state == YAFFS_BLOCK_STATE_DEAD) {
+			yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+				"block %d is bad", blk);
+		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+			yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+			dev->n_erased_blocks++;
+			dev->n_free_chunks += dev->param.chunks_per_block;
+		} else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+			/* Determine the highest sequence number */
+			if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+			    seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+				block_index[n_to_scan].seq = seq_number;
+				block_index[n_to_scan].block = blk;
+
+				n_to_scan++;
+
+				if (seq_number >= dev->seq_number)
+					dev->seq_number = seq_number;
+			} else {
+				/* TODO: Nasty sequence number! */
+				yaffs_trace(YAFFS_TRACE_SCAN,
+					"Block scanning block %d has bad sequence number %d",
+					blk, seq_number);
+
+			}
+		}
+		bi++;
+	}
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+	cond_resched();
+
+	/* Sort the blocks by sequence number */
+	sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+		   yaffs2_ybicmp, NULL);
+
+	cond_resched();
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+	/* Now scan the blocks looking at the data. */
+	start_iter = 0;
+	end_iter = n_to_scan - 1;
+	yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+	/* For each block.... backwards */
+	for (block_iter = end_iter; !alloc_failed && block_iter >= start_iter;
+	     block_iter--) {
+		/* Cooperative multitasking! This loop can run for so
+		   long that watchdog timers expire. */
+		cond_resched();
+
+		/* get the block to scan in the correct order */
+		blk = block_index[block_iter].block;
+
+		bi = yaffs_get_block_info(dev, blk);
+
+		state = bi->block_state;
+
+		deleted = 0;
+
+		/* For each chunk in each block that needs scanning.... */
+		found_chunks = 0;
+		for (c = dev->param.chunks_per_block - 1;
+		     !alloc_failed && c >= 0 &&
+		     (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+		      state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+			/* Scan backwards...
+			 * Read the tags and decide what to do
+			 */
+
+			chunk = blk * dev->param.chunks_per_block + c;
+
+			result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+							  &tags);
+
+			/* Let's have a good look at this chunk... */
+
+			if (!tags.chunk_used) {
+				/* An unassigned chunk in the block.
+				 * If there are used chunks after this one, then
+				 * it is a chunk that was skipped due to failing the erased
+				 * check. Just skip it so that it can be deleted.
+				 * But, more typically, We get here when this is an unallocated
+				 * chunk and his means that either the block is empty or
+				 * this is the one being allocated from
+				 */
+
+				if (found_chunks) {
+					/* This is a chunk that was skipped due to failing the erased check */
+				} else if (c == 0) {
+					/* We're looking at the first chunk in the block so the block is unused */
+					state = YAFFS_BLOCK_STATE_EMPTY;
+					dev->n_erased_blocks++;
+				} else {
+					if (state ==
+					    YAFFS_BLOCK_STATE_NEEDS_SCANNING
+					    || state ==
+					    YAFFS_BLOCK_STATE_ALLOCATING) {
+						if (dev->seq_number ==
+						    bi->seq_number) {
+							/* this is the block being allocated from */
+
+							yaffs_trace(YAFFS_TRACE_SCAN,
+								" Allocating from %d %d",
+								blk, c);
+
+							state =
+							    YAFFS_BLOCK_STATE_ALLOCATING;
+							dev->alloc_block = blk;
+							dev->alloc_page = c;
+							dev->
+							    alloc_block_finder =
+							    blk;
+						} else {
+							/* This is a partially written block that is not
+							 * the current allocation block.
+							 */
+
+							yaffs_trace(YAFFS_TRACE_SCAN,
+								"Partially written block %d detected",
+								blk);
+						}
+					}
+				}
+
+				dev->n_free_chunks++;
+
+			} else if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+				yaffs_trace(YAFFS_TRACE_SCAN,
+					" Unfixed ECC in chunk(%d:%d), chunk ignored",
+					blk, c);
+
+				dev->n_free_chunks++;
+
+			} else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+				   tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+				   (tags.chunk_id > 0
+				    && tags.n_bytes > dev->data_bytes_per_chunk)
+				   || tags.seq_number != bi->seq_number) {
+				yaffs_trace(YAFFS_TRACE_SCAN,
+					"Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+					blk, c, tags.obj_id,
+					tags.chunk_id, tags.n_bytes);
+
+				dev->n_free_chunks++;
+
+			} else if (tags.chunk_id > 0) {
+				/* chunk_id > 0 so it is a data chunk... */
+				unsigned int endpos;
+				u32 chunk_base =
+				    (tags.chunk_id -
+				     1) * dev->data_bytes_per_chunk;
+
+				found_chunks = 1;
+
+				yaffs_set_chunk_bit(dev, blk, c);
+				bi->pages_in_use++;
+
+				in = yaffs_find_or_create_by_number(dev,
+								    tags.obj_id,
+								    YAFFS_OBJECT_TYPE_FILE);
+				if (!in) {
+					/* Out of memory */
+					alloc_failed = 1;
+				}
+
+				if (in &&
+				    in->variant_type == YAFFS_OBJECT_TYPE_FILE
+				    && chunk_base <
+				    in->variant.file_variant.shrink_size) {
+					/* This has not been invalidated by a resize */
+					if (!yaffs_put_chunk_in_file
+					    (in, tags.chunk_id, chunk, -1)) {
+						alloc_failed = 1;
+					}
+
+					/* File size is calculated by looking at the data chunks if we have not
+					 * seen an object header yet. Stop this practice once we find an object header.
+					 */
+					endpos = chunk_base + tags.n_bytes;
+
+					if (!in->valid &&	/* have not got an object header yet */
+					    in->variant.file_variant.
+					    scanned_size < endpos) {
+						in->variant.file_variant.
+						    scanned_size = endpos;
+						in->variant.file_variant.
+						    file_size = endpos;
+					}
+
+				} else if (in) {
+					/* This chunk has been invalidated by a resize, or a past file deletion
+					 * so delete the chunk*/
+					yaffs_chunk_del(dev, chunk, 1,
+							__LINE__);
+
+				}
+			} else {
+				/* chunk_id == 0, so it is an ObjectHeader.
+				 * Thus, we read in the object header and make the object
+				 */
+				found_chunks = 1;
+
+				yaffs_set_chunk_bit(dev, blk, c);
+				bi->pages_in_use++;
+
+				oh = NULL;
+				in = NULL;
+
+				if (tags.extra_available) {
+					in = yaffs_find_or_create_by_number(dev,
+									    tags.
+									    obj_id,
+									    tags.
+									    extra_obj_type);
+					if (!in)
+						alloc_failed = 1;
+				}
+
+				if (!in ||
+				    (!in->valid && dev->param.disable_lazy_load)
+				    || tags.extra_shadows || (!in->valid
+							      && (tags.obj_id ==
+								  YAFFS_OBJECTID_ROOT
+								  || tags.
+								  obj_id ==
+								  YAFFS_OBJECTID_LOSTNFOUND)))
+				{
+
+					/* If we don't have  valid info then we need to read the chunk
+					 * TODO In future we can probably defer reading the chunk and
+					 * living with invalid data until needed.
+					 */
+
+					result = yaffs_rd_chunk_tags_nand(dev,
+									  chunk,
+									  chunk_data,
+									  NULL);
+
+					oh = (struct yaffs_obj_hdr *)chunk_data;
+
+					if (dev->param.inband_tags) {
+						/* Fix up the header if they got corrupted by inband tags */
+						oh->shadows_obj =
+						    oh->inband_shadowed_obj_id;
+						oh->is_shrink =
+						    oh->inband_is_shrink;
+					}
+
+					if (!in) {
+						in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type);
+						if (!in)
+							alloc_failed = 1;
+					}
+
+				}
+
+				if (!in) {
+					/* TODO Hoosterman we have a problem! */
+					yaffs_trace(YAFFS_TRACE_ERROR,
+						"yaffs tragedy: Could not make object for object  %d at chunk %d during scan",
+						tags.obj_id, chunk);
+					continue;
+				}
+
+				if (in->valid) {
+					/* We have already filled this one.
+					 * We have a duplicate that will be discarded, but
+					 * we first have to suck out resize info if it is a file.
+					 */
+
+					if ((in->variant_type ==
+					     YAFFS_OBJECT_TYPE_FILE) && ((oh
+									  &&
+									  oh->
+									  type
+									  ==
+									  YAFFS_OBJECT_TYPE_FILE)
+									 ||
+									 (tags.
+									  extra_available
+									  &&
+									  tags.
+									  extra_obj_type
+									  ==
+									  YAFFS_OBJECT_TYPE_FILE)))
+					{
+						u32 this_size =
+						    (oh) ? oh->
+						    file_size :
+						    tags.extra_length;
+						u32 parent_obj_id =
+						    (oh) ? oh->parent_obj_id :
+						    tags.extra_parent_id;
+
+						is_shrink =
+						    (oh) ? oh->
+						    is_shrink :
+						    tags.extra_is_shrink;
+
+						/* If it is deleted (unlinked at start also means deleted)
+						 * we treat the file size as being zeroed at this point.
+						 */
+						if (parent_obj_id ==
+						    YAFFS_OBJECTID_DELETED
+						    || parent_obj_id ==
+						    YAFFS_OBJECTID_UNLINKED) {
+							this_size = 0;
+							is_shrink = 1;
+						}
+
+						if (is_shrink
+						    && in->variant.file_variant.
+						    shrink_size > this_size)
+							in->variant.
+							    file_variant.
+							    shrink_size =
+							    this_size;
+
+						if (is_shrink)
+							bi->has_shrink_hdr = 1;
+
+					}
+					/* Use existing - destroy this one. */
+					yaffs_chunk_del(dev, chunk, 1,
+							__LINE__);
+
+				}
+
+				if (!in->valid && in->variant_type !=
+				    (oh ? oh->type : tags.extra_obj_type))
+					yaffs_trace(YAFFS_TRACE_ERROR,
+						"yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+						oh ?
+						oh->type : tags.extra_obj_type,
+						in->variant_type, tags.obj_id,
+						chunk);
+
+				if (!in->valid &&
+				    (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+				     tags.obj_id ==
+				     YAFFS_OBJECTID_LOSTNFOUND)) {
+					/* We only load some info, don't fiddle with directory structure */
+					in->valid = 1;
+
+					if (oh) {
+
+						in->yst_mode = oh->yst_mode;
+						yaffs_load_attribs(in, oh);
+						in->lazy_loaded = 0;
+					} else {
+						in->lazy_loaded = 1;
+                                        }
+					in->hdr_chunk = chunk;
+
+				} else if (!in->valid) {
+					/* we need to load this info */
+
+					in->valid = 1;
+					in->hdr_chunk = chunk;
+
+					if (oh) {
+						in->variant_type = oh->type;
+
+						in->yst_mode = oh->yst_mode;
+						yaffs_load_attribs(in, oh);
+
+						if (oh->shadows_obj > 0)
+							yaffs_handle_shadowed_obj
+							    (dev,
+							     oh->shadows_obj,
+							     1);
+
+						yaffs_set_obj_name_from_oh(in,
+									   oh);
+						parent =
+						    yaffs_find_or_create_by_number
+						    (dev, oh->parent_obj_id,
+						     YAFFS_OBJECT_TYPE_DIRECTORY);
+
+						file_size = oh->file_size;
+						is_shrink = oh->is_shrink;
+						equiv_id = oh->equiv_id;
+
+					} else {
+						in->variant_type =
+						    tags.extra_obj_type;
+						parent =
+						    yaffs_find_or_create_by_number
+						    (dev, tags.extra_parent_id,
+						     YAFFS_OBJECT_TYPE_DIRECTORY);
+						file_size = tags.extra_length;
+						is_shrink =
+						    tags.extra_is_shrink;
+						equiv_id = tags.extra_equiv_id;
+						in->lazy_loaded = 1;
+
+					}
+					in->dirty = 0;
+
+					if (!parent)
+						alloc_failed = 1;
+
+					/* directory stuff...
+					 * hook up to parent
+					 */
+
+					if (parent && parent->variant_type ==
+					    YAFFS_OBJECT_TYPE_UNKNOWN) {
+						/* Set up as a directory */
+						parent->variant_type =
+						    YAFFS_OBJECT_TYPE_DIRECTORY;
+						INIT_LIST_HEAD(&parent->
+							       variant.dir_variant.children);
+					} else if (!parent
+						   || parent->variant_type !=
+						   YAFFS_OBJECT_TYPE_DIRECTORY) {
+						/* Hoosterman, another problem....
+						 * We're trying to use a non-directory as a directory
+						 */
+
+						yaffs_trace(YAFFS_TRACE_ERROR,
+							"yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+							);
+						parent = dev->lost_n_found;
+					}
+
+					yaffs_add_obj_to_dir(parent, in);
+
+					is_unlinked = (parent == dev->del_dir)
+					    || (parent == dev->unlinked_dir);
+
+					if (is_shrink) {
+						/* Mark the block as having a shrink header */
+						bi->has_shrink_hdr = 1;
+					}
+
+					/* Note re hardlinks.
+					 * Since we might scan a hardlink before its equivalent object is scanned
+					 * we put them all in a list.
+					 * After scanning is complete, we should have all the objects, so we run
+					 * through this list and fix up all the chains.
+					 */
+
+					switch (in->variant_type) {
+					case YAFFS_OBJECT_TYPE_UNKNOWN:
+						/* Todo got a problem */
+						break;
+					case YAFFS_OBJECT_TYPE_FILE:
+
+						if (in->variant.
+						    file_variant.scanned_size <
+						    file_size) {
+							/* This covers the case where the file size is greater
+							 * than where the data is
+							 * This will happen if the file is resized to be larger
+							 * than its current data extents.
+							 */
+							in->variant.
+							    file_variant.
+							    file_size =
+							    file_size;
+							in->variant.
+							    file_variant.
+							    scanned_size =
+							    file_size;
+						}
+
+						if (in->variant.file_variant.
+						    shrink_size > file_size)
+							in->variant.
+							    file_variant.
+							    shrink_size =
+							    file_size;
+
+						break;
+					case YAFFS_OBJECT_TYPE_HARDLINK:
+						if (!is_unlinked) {
+							in->variant.
+							    hardlink_variant.
+							    equiv_id = equiv_id;
+							in->hard_links.next =
+							    (struct list_head *)
+							    hard_list;
+							hard_list = in;
+						}
+						break;
+					case YAFFS_OBJECT_TYPE_DIRECTORY:
+						/* Do nothing */
+						break;
+					case YAFFS_OBJECT_TYPE_SPECIAL:
+						/* Do nothing */
+						break;
+					case YAFFS_OBJECT_TYPE_SYMLINK:
+						if (oh) {
+							in->variant.
+							    symlink_variant.
+							    alias =
+							    yaffs_clone_str(oh->
+									    alias);
+							if (!in->variant.
+							    symlink_variant.
+							    alias)
+								alloc_failed =
+								    1;
+						}
+						break;
+					}
+
+				}
+
+			}
+
+		}		/* End of scanning for each chunk */
+
+		if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+			/* If we got this far while scanning, then the block is fully allocated. */
+			state = YAFFS_BLOCK_STATE_FULL;
+		}
+
+		bi->block_state = state;
+
+		/* Now let's see if it was dirty */
+		if (bi->pages_in_use == 0 &&
+		    !bi->has_shrink_hdr &&
+		    bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+			yaffs_block_became_dirty(dev, blk);
+		}
+
+	}
+
+	yaffs_skip_rest_of_block(dev);
+
+	if (alt_block_index)
+		vfree(block_index);
+	else
+		kfree(block_index);
+
+	/* Ok, we've done all the scanning.
+	 * Fix up the hard link chains.
+	 * We should now have scanned all the objects, now it's time to add these
+	 * hardlinks.
+	 */
+	yaffs_link_fixup(dev, hard_list);
+
+	yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+	if (alloc_failed)
+		return YAFFS_FAIL;
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 0000000..e1a9287
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+				   struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+				    struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100644
index 0000000..8183425
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,70 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_LINUX_H__
+#define __YPORTENV_LINUX_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x)     x
+
+#define YAFFS_LOSTNFOUND_NAME		"lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX		"obj"
+
+
+#define YAFFS_ROOT_MODE			0755
+#define YAFFS_LOSTNFOUND_MODE		0700
+
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+
+#define compile_time_assertion(assertion) \
+	({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#ifndef Y_DUMP_STACK
+#define Y_DUMP_STACK() dump_stack()
+#endif
+
+#define yaffs_trace(msk, fmt, ...) do { \
+	if(yaffs_trace_mask & (msk)) \
+		printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while(0)
+
+#ifndef YBUG
+#define YBUG() do {\
+	yaffs_trace(YAFFS_TRACE_BUG,\
+		"bug " __FILE__ " %d",\
+		__LINE__);\
+	Y_DUMP_STACK();\
+} while (0)
+#endif
+
+#endif
diff --git a/include/linux/akm8975.h b/include/linux/akm8975.h
new file mode 100644
index 0000000..6a7c432
--- /dev/null
+++ b/include/linux/akm8975.h
@@ -0,0 +1,87 @@
+/*
+ * Definitions for akm8975 compass chip.
+ */
+#ifndef AKM8975_H
+#define AKM8975_H
+
+#include <linux/ioctl.h>
+
+/*! \name AK8975 operation mode
+ \anchor AK8975_Mode
+ Defines an operation mode of the AK8975.*/
+/*! @{*/
+#define AK8975_MODE_SNG_MEASURE   0x01
+#define	AK8975_MODE_SELF_TEST     0x08
+#define	AK8975_MODE_FUSE_ACCESS   0x0F
+#define	AK8975_MODE_POWER_DOWN    0x00
+/*! @}*/
+
+#define RBUFF_SIZE		8	/* Rx buffer size */
+
+/*! \name AK8975 register address
+\anchor AK8975_REG
+Defines a register address of the AK8975.*/
+/*! @{*/
+#define AK8975_REG_WIA		0x00
+#define AK8975_REG_INFO		0x01
+#define AK8975_REG_ST1		0x02
+#define AK8975_REG_HXL		0x03
+#define AK8975_REG_HXH		0x04
+#define AK8975_REG_HYL		0x05
+#define AK8975_REG_HYH		0x06
+#define AK8975_REG_HZL		0x07
+#define AK8975_REG_HZH		0x08
+#define AK8975_REG_ST2		0x09
+#define AK8975_REG_CNTL		0x0A
+#define AK8975_REG_RSV		0x0B
+#define AK8975_REG_ASTC		0x0C
+#define AK8975_REG_TS1		0x0D
+#define AK8975_REG_TS2		0x0E
+#define AK8975_REG_I2CDIS	0x0F
+/*! @}*/
+
+/*! \name AK8975 fuse-rom address
+\anchor AK8975_FUSE
+Defines a read-only address of the fuse ROM of the AK8975.*/
+/*! @{*/
+#define AK8975_FUSE_ASAX	0x10
+#define AK8975_FUSE_ASAY	0x11
+#define AK8975_FUSE_ASAZ	0x12
+/*! @}*/
+
+#define AKMIO                   0xA1
+
+/* IOCTLs for AKM library */
+#define ECS_IOCTL_WRITE                 _IOW(AKMIO, 0x02, char[5])
+#define ECS_IOCTL_READ                  _IOWR(AKMIO, 0x03, char[5])
+#define ECS_IOCTL_GETDATA               _IOR(AKMIO, 0x08, char[RBUFF_SIZE])
+#define ECS_IOCTL_SET_YPR               _IOW(AKMIO, 0x0C, short[12])
+#define ECS_IOCTL_GET_OPEN_STATUS       _IOR(AKMIO, 0x0D, int)
+#define ECS_IOCTL_GET_CLOSE_STATUS      _IOR(AKMIO, 0x0E, int)
+#define ECS_IOCTL_GET_DELAY             _IOR(AKMIO, 0x30, short)
+
+/* IOCTLs for APPs */
+#define ECS_IOCTL_APP_SET_MFLAG		_IOW(AKMIO, 0x11, short)
+#define ECS_IOCTL_APP_GET_MFLAG		_IOW(AKMIO, 0x12, short)
+#define ECS_IOCTL_APP_SET_AFLAG		_IOW(AKMIO, 0x13, short)
+#define ECS_IOCTL_APP_GET_AFLAG		_IOR(AKMIO, 0x14, short)
+#define ECS_IOCTL_APP_SET_DELAY		_IOW(AKMIO, 0x18, short)
+#define ECS_IOCTL_APP_GET_DELAY		ECS_IOCTL_GET_DELAY
+/* Set raw magnetic vector flag */
+#define ECS_IOCTL_APP_SET_MVFLAG	_IOW(AKMIO, 0x19, short)
+/* Get raw magnetic vector flag */
+#define ECS_IOCTL_APP_GET_MVFLAG	_IOR(AKMIO, 0x1A, short)
+#define ECS_IOCTL_APP_SET_TFLAG         _IOR(AKMIO, 0x15, short)
+
+
+struct akm8975_platform_data {
+	int intr;
+
+	int (*init)(void);
+	void (*exit)(void);
+	int (*power_on)(void);
+	int (*power_off)(void);
+};
+
+#endif
+
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 2111481..60c737f 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -5,6 +5,15 @@
 #define AMBA_MMCI_H
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
 
 /* Just some dummy forwarding */
 struct dma_chan;
@@ -55,6 +64,9 @@
 	bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
 	void *dma_rx_param;
 	void *dma_tx_param;
+	unsigned int status_irq;
+	struct embedded_sdio_data *embedded_sdio;
+	int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
 };
 
 #endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644
index 0000000..7f16a14
--- /dev/null
+++ b/include/linux/android_aid.h
@@ -0,0 +1,26 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_NET_BT_ADMIN 3001
+#define AID_NET_BT       3002
+#define AID_INET         3003
+#define AID_NET_RAW      3004
+#define AID_NET_ADMIN    3005
+
+#endif
diff --git a/include/linux/android_alarm.h b/include/linux/android_alarm.h
new file mode 100644
index 0000000..f8f14e7
--- /dev/null
+++ b/include/linux/android_alarm.h
@@ -0,0 +1,106 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+	/* return code bit numbers or set alarm arg */
+	ANDROID_ALARM_RTC_WAKEUP,
+	ANDROID_ALARM_RTC,
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+	ANDROID_ALARM_ELAPSED_REALTIME,
+	ANDROID_ALARM_SYSTEMTIME,
+
+	ANDROID_ALARM_TYPE_COUNT,
+
+	/* return code bit numbers */
+	/* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node:	red black tree node for time ordered insertion
+ * @type:	alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires:	the absolute expiry time.
+ * @function:	alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct alarm {
+	struct rb_node 		node;
+	enum android_alarm_type type;
+	ktime_t			softexpires;
+	ktime_t			expires;
+	void			(*function)(struct alarm *);
+};
+
+void alarm_init(struct alarm *alarm,
+	enum android_alarm_type type, void (*function)(struct alarm *));
+void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end);
+int alarm_try_to_cancel(struct alarm *alarm);
+int alarm_cancel(struct alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int alarm_set_rtc(const struct timespec ts);
+
+#endif
+
+enum android_alarm_return_flags {
+	ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+	ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+				1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+	ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+				1U << ANDROID_ALARM_ELAPSED_REALTIME,
+	ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+	ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT                  _IO('a', 1)
+
+#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h
new file mode 100644
index 0000000..f633621
--- /dev/null
+++ b/include/linux/android_pmem.h
@@ -0,0 +1,93 @@
+/* include/linux/android_pmem.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ANDROID_PMEM_H_
+#define _ANDROID_PMEM_H_
+
+#define PMEM_IOCTL_MAGIC 'p'
+#define PMEM_GET_PHYS		_IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
+#define PMEM_MAP		_IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
+#define PMEM_GET_SIZE		_IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
+#define PMEM_UNMAP		_IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
+/* This ioctl will allocate pmem space, backing the file, it will fail
+ * if the file already has an allocation, pass it the len as the argument
+ * to the ioctl */
+#define PMEM_ALLOCATE		_IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
+/* This will connect a one pmem file to another, pass the file that is already
+ * backed in memory as the argument to the ioctl
+ */
+#define PMEM_CONNECT		_IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
+/* Returns the total size of the pmem region it is sent to as a pmem_region
+ * struct (with offset set to 0). 
+ */
+#define PMEM_GET_TOTAL_SIZE	_IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
+#define PMEM_CACHE_FLUSH	_IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
+
+struct android_pmem_platform_data
+{
+	const char* name;
+	/* starting physical address of memory region */
+	unsigned long start;
+	/* size of memory region */
+	unsigned long size;
+	/* set to indicate the region should not be managed with an allocator */
+	unsigned no_allocator;
+	/* set to indicate maps of this region should be cached, if a mix of
+	 * cached and uncached is desired, set this and open the device with
+	 * O_SYNC to get an uncached region */
+	unsigned cached;
+	/* The MSM7k has bits to enable a write buffer in the bus controller*/
+	unsigned buffered;
+};
+
+struct pmem_region {
+	unsigned long offset;
+	unsigned long len;
+};
+
+#ifdef CONFIG_ANDROID_PMEM
+int is_pmem_file(struct file *file);
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+		  unsigned long *end, struct file **filp);
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+		       unsigned long *end);
+void put_pmem_file(struct file* file);
+void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
+int pmem_setup(struct android_pmem_platform_data *pdata,
+	       long (*ioctl)(struct file *, unsigned int, unsigned long),
+	       int (*release)(struct inode *, struct file *));
+int pmem_remap(struct pmem_region *region, struct file *file,
+	       unsigned operation);
+
+#else
+static inline int is_pmem_file(struct file *file) { return 0; }
+static inline int get_pmem_file(int fd, unsigned long *start,
+				unsigned long *vstart, unsigned long *end,
+				struct file **filp) { return -ENOSYS; }
+static inline int get_pmem_user_addr(struct file *file, unsigned long *start,
+				     unsigned long *end) { return -ENOSYS; }
+static inline void put_pmem_file(struct file* file) { return; }
+static inline void flush_pmem_file(struct file *file, unsigned long start,
+				   unsigned long len) { return; }
+static inline int pmem_setup(struct android_pmem_platform_data *pdata,
+	      long (*ioctl)(struct file *, unsigned int, unsigned long),
+	      int (*release)(struct inode *, struct file *)) { return -ENOSYS; }
+
+static inline int pmem_remap(struct pmem_region *region, struct file *file,
+			     unsigned operation) { return -ENOSYS; }
+#endif
+
+#endif //_ANDROID_PPP_H_
+
diff --git a/include/linux/ashmem.h b/include/linux/ashmem.h
new file mode 100644
index 0000000..1976b10
--- /dev/null
+++ b/include/linux/ashmem.h
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN		256
+
+#define ASHMEM_NAME_DEF		"dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED	0
+#define ASHMEM_WAS_PURGED	1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED	0
+#define ASHMEM_IS_PINNED	1
+
+struct ashmem_pin {
+	__u32 offset;	/* offset into region, in bytes, page-aligned */
+	__u32 len;	/* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC		0x77
+
+#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10)
+
+#endif	/* _LINUX_ASHMEM_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ab4ac0c..0c34e9c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -84,12 +84,6 @@
 	CSS_REMOVED, /* This CSS is dead */
 };
 
-/* Caller must verify that the css is not for root cgroup */
-static inline void __css_get(struct cgroup_subsys_state *css, int count)
-{
-	atomic_add(count, &css->refcnt);
-}
-
 /*
  * Call css_get() to hold a reference on the css; it can be used
  * for a reference obtained via:
@@ -97,6 +91,7 @@
  * - task->cgroups for a locked task
  */
 
+extern void __css_get(struct cgroup_subsys_state *css, int count);
 static inline void css_get(struct cgroup_subsys_state *css)
 {
 	/* We don't need to reference count the root state */
@@ -143,10 +138,7 @@
 enum {
 	/* Control Group is dead */
 	CGRP_REMOVED,
-	/*
-	 * Control Group has previously had a child cgroup or a task,
-	 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
-	 */
+	/* Control Group has ever had a child cgroup or a task */
 	CGRP_RELEASABLE,
 	/* Control Group requires release notifications to userspace */
 	CGRP_NOTIFY_ON_RELEASE,
@@ -287,6 +279,7 @@
 
 	/* For RCU-protected deletion */
 	struct rcu_head rcu_head;
+	struct work_struct work;
 };
 
 /*
diff --git a/include/linux/cpuacct.h b/include/linux/cpuacct.h
new file mode 100644
index 0000000..8f68e73
--- /dev/null
+++ b/include/linux/cpuacct.h
@@ -0,0 +1,43 @@
+/* include/linux/cpuacct.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _CPUACCT_H_
+#define _CPUACCT_H_
+
+#include <linux/cgroup.h>
+
+#ifdef CONFIG_CGROUP_CPUACCT
+
+/*
+ * Platform specific CPU frequency hooks for cpuacct. These functions are
+ * called from the scheduler.
+ */
+struct cpuacct_charge_calls {
+	/*
+	 * Platforms can take advantage of this data and use
+	 * per-cpu allocations if necessary.
+	 */
+	void (*init) (void **cpuacct_data);
+	void (*charge) (void *cpuacct_data,  u64 cputime, unsigned int cpu);
+	void (*cpufreq_show) (void *cpuacct_data, struct cgroup_map_cb *cb);
+	/* Returns power consumed in milliWatt seconds */
+	u64 (*power_usage) (void *cpuacct_data);
+};
+
+int cpuacct_charge_register(struct cpuacct_charge_calls *fn);
+
+#endif /* CONFIG_CGROUP_CPUACCT */
+
+#endif // _CPUACCT_H_
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 11be48e..ae06dc9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -358,6 +358,9 @@
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_interactive)
 #endif
 
 
diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h
new file mode 100755
index 0000000..8343b81
--- /dev/null
+++ b/include/linux/earlysuspend.h
@@ -0,0 +1,56 @@
+/* include/linux/earlysuspend.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_EARLYSUSPEND_H
+#define _LINUX_EARLYSUSPEND_H
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/list.h>
+#endif
+
+/* The early_suspend structure defines suspend and resume hooks to be called
+ * when the user visible sleep state of the system changes, and a level to
+ * control the order. They can be used to turn off the screen and input
+ * devices that are not used for wakeup.
+ * Suspend handlers are called in low to high level order, resume handlers are
+ * called in the opposite order. If, when calling register_early_suspend,
+ * the suspend handlers have already been called without a matching call to the
+ * resume handlers, the suspend handler will be called directly from
+ * register_early_suspend. This direct call can violate the normal level order.
+ */
+enum {
+	EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50,
+	EARLY_SUSPEND_LEVEL_STOP_DRAWING = 100,
+	EARLY_SUSPEND_LEVEL_DISABLE_FB = 150,
+};
+struct early_suspend {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct list_head link;
+	int level;
+	void (*suspend)(struct early_suspend *h);
+	void (*resume)(struct early_suspend *h);
+#endif
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void register_early_suspend(struct early_suspend *handler);
+void unregister_early_suspend(struct early_suspend *handler);
+#else
+#define register_early_suspend(handler) do { } while (0)
+#define unregister_early_suspend(handler) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644
index 0000000..360b4dd
--- /dev/null
+++ b/include/linux/gpio_event.h
@@ -0,0 +1,169 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+	int count;
+	struct input_dev *dev[];
+};
+enum {
+	GPIO_EVENT_FUNC_UNINIT  = 0x0,
+	GPIO_EVENT_FUNC_INIT    = 0x1,
+	GPIO_EVENT_FUNC_SUSPEND = 0x2,
+	GPIO_EVENT_FUNC_RESUME  = 0x3,
+};
+struct gpio_event_info {
+	int (*func)(struct gpio_event_input_devs *input_devs,
+		    struct gpio_event_info *info,
+		    void **data, int func);
+	int (*event)(struct gpio_event_input_devs *input_devs,
+		     struct gpio_event_info *info,
+		     void **data, unsigned int dev, unsigned int type,
+		     unsigned int code, int value); /* out events */
+	bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+	const char *name;
+	struct gpio_event_info **info;
+	size_t info_count;
+	int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+	const char *names[]; /* If name is NULL, names contain a NULL */
+			     /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+	/* unset: drive active output low, set: drive active output high */
+	GPIOKPF_ACTIVE_HIGH              = 1U << 0,
+	GPIOKPF_DEBOUNCE                 = 1U << 1,
+	GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+	GPIOKPF_REMOVE_PHANTOM_KEYS      = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+					   GPIOKPF_DEBOUNCE,
+	GPIOKPF_DRIVE_INACTIVE           = 1U << 3,
+	GPIOKPF_LEVEL_TRIGGERED_IRQ      = 1U << 4,
+	GPIOKPF_PRINT_UNMAPPED_KEYS      = 1U << 16,
+	GPIOKPF_PRINT_MAPPED_KEYS        = 1U << 17,
+	GPIOKPF_PRINT_PHANTOM_KEYS       = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+	(((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+	/* initialize to gpio_event_matrix_func */
+	struct gpio_event_info info;
+	/* size must be ninputs * noutputs */
+	const unsigned short *keymap;
+	unsigned int *input_gpios;
+	unsigned int *output_gpios;
+	unsigned int ninputs;
+	unsigned int noutputs;
+	/* time to wait before reading inputs after driving each output */
+	ktime_t settle_time;
+	/* time to wait before scanning the keypad a second time */
+	ktime_t debounce_delay;
+	ktime_t poll_time;
+	unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+	GPIOEDF_ACTIVE_HIGH         = 1U << 0,
+/*	GPIOEDF_USE_DOWN_IRQ        = 1U << 1, */
+/*	GPIOEDF_USE_IRQ             = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+	GPIOEDF_PRINT_KEYS          = 1U << 8,
+	GPIOEDF_PRINT_KEY_DEBOUNCE  = 1U << 9,
+};
+
+struct gpio_event_direct_entry {
+	uint32_t gpio:16;
+	uint32_t code:10;
+	uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+	/* initialize to gpio_event_input_func */
+	struct gpio_event_info info;
+	ktime_t debounce_time;
+	ktime_t poll_time;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data,
+			unsigned int dev, unsigned int type,
+			unsigned int code, int value);
+struct gpio_event_output_info {
+	/* initialize to gpio_event_output_func and gpio_event_output_event */
+	struct gpio_event_info info;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+	GPIOEAF_PRINT_UNKNOWN_DIRECTION  = 1U << 16,
+	GPIOEAF_PRINT_RAW                = 1U << 17,
+	GPIOEAF_PRINT_EVENT              = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+	/* initialize to gpio_event_axis_func */
+	struct gpio_event_info info;
+	uint8_t  count; /* number of gpios for this axis */
+	uint8_t  dev; /* device index when using multiple input devices */
+	uint8_t  type; /* EV_REL or EV_ABS */
+	uint16_t code;
+	uint16_t decoded_size;
+	uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+	uint32_t *gpio;
+	uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644
index 0000000..c06bd6c
--- /dev/null
+++ b/include/linux/if_pppolac.h
@@ -0,0 +1,33 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OLAC */
+	int		udp_socket;
+	struct __attribute__((packed)) {
+		__u16	tunnel, session;
+	} local, remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644
index 0000000..0cf34b4
--- /dev/null
+++ b/include/linux/if_pppopns.h
@@ -0,0 +1,32 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OPNS */
+	int		tcp_socket;
+	__u16		local;
+	__u16		remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 397921b..999ccd3 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -27,6 +27,8 @@
 #include <linux/ppp_channel.h>
 #endif /* __KERNEL__ */
 #include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
@@ -60,7 +62,9 @@
 #define PX_PROTO_OE    0 /* Currently just PPPoE */
 #define PX_PROTO_OL2TP 1 /* Now L2TP also */
 #define PX_PROTO_PPTP  2
-#define PX_MAX_PROTO   3
+#define PX_PROTO_OLAC  3
+#define PX_PROTO_OPNS  4
+#define PX_MAX_PROTO   5
 
 struct sockaddr_pppox {
 	sa_family_t     sa_family;            /* address family, AF_PPPOX */
@@ -167,6 +171,25 @@
 	u32 seq_sent, seq_recv;
 	int ppp_flags;
 };
+
+struct pppolac_opt {
+	__u32		local;
+	__u32		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	atomic_t	sequencing;
+	int		(*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+	__u16		local;
+	__u16		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	void		(*data_ready)(struct sock *sk_raw, int length);
+	int		(*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
 #include <net/sock.h>
 
 struct pppox_sock {
@@ -177,6 +200,8 @@
 	union {
 		struct pppoe_opt pppoe;
 		struct pptp_opt  pptp;
+		struct pppolac_opt lac;
+		struct pppopns_opt pns;
 	} proto;
 	__be16			num;
 };
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 0000000..111982f
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,339 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+struct ion_handle;
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ * 				 carveout heap, allocations are physically
+ * 				 contiguous
+ * @ION_HEAP_END:		 helper for iterating over heaps
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+				 are at the end of this enum */
+	ION_NUM_HEAPS,
+};
+
+#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
+
+#ifdef __KERNEL__
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+   plumbed in the kernel, and all instances of ion_phys_addr_t should
+   be converted to phys_addr_t.  For the time being many kernel interfaces
+   do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type:	type of the heap from ion_heap_type enum
+ * @id:		unique identifier for heap.  When allocating (lower numbers 
+ * 		will be allocated from first)
+ * @name:	used for debug purposes
+ * @base:	base address of heap in physical memory if applicable
+ * @size:	size of the heap in bytes if applicable
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+	enum ion_heap_type type;
+	unsigned int id;
+	const char *name;
+	ion_phys_addr_t base;
+	size_t size;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr:		number of structures in the array
+ * @heaps:	array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+	int nr;
+	struct ion_platform_heap heaps[];
+};
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @dev:	the global ion device
+ * @heap_mask:	mask of heaps this client can allocate from
+ * @name:	used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     unsigned int heap_mask, const char *name);
+
+/**
+ * ion_client_destroy() -  free's a client and all it's handles
+ * @client:	the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client:	the client
+ * @len:	size of the allocation
+ * @align:	requested allocation alignment, lots of hardware blocks have
+ *		alignment requirements of some kind
+ * @flags:	mask of heaps to allocate from, if multiple bits are set
+ *		heaps will be tried in order from lowest to highest order bit
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client:	the client
+ * @handle:	the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client:	the client
+ * @handle:	the handle
+ * @addr:	a pointer to put the address in
+ * @len:	a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address.  It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_map_dma should be used
+ * instead.  Returns -EINVAL if the handle is invalid.  This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client:	the client
+ * @handle:	handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client:	the client
+ * @handle:	handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_map_dma - create a dma mapping for a given handle
+ * @client:	the client
+ * @handle:	handle to map
+ *
+ * Return an sglist describing the given handle
+ */
+struct scatterlist *ion_map_dma(struct ion_client *client,
+				struct ion_handle *handle);
+
+/**
+ * ion_unmap_dma() - destroy a dma mapping for a handle
+ * @client:	the client
+ * @handle:	handle to unmap
+ */
+void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share() - given a handle, obtain a buffer to pass to other clients
+ * @client:	the client
+ * @handle:	the handle to share
+ *
+ * Given a handle, return a buffer which exists in a global name
+ * space and can be passed to other clients.  Should be passed into ion_import
+ * to obtain a new handle for this buffer.
+ */
+struct ion_buffer *ion_share(struct ion_client *client,
+			     struct ion_handle *handle);
+
+/**
+ * ion_import() - given an buffer in another client, import it
+ * @client:	this blocks client
+ * @buffer:	the buffer to import (as obtained from ion_share)
+ *
+ * Given a buffer, add it to the client and return the handle to use to refer
+ * to it further.  This is called to share a handle from one kernel client to
+ * another.
+ */
+struct ion_handle *ion_import(struct ion_client *client,
+			      struct ion_buffer *buffer);
+
+/**
+ * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
+ * @client:	this blocks client
+ * @fd:		the fd
+ *
+ * A helper function for drivers that will be recieving ion buffers shared
+ * with them from userspace.  These buffers are represented by a file
+ * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
+ * This function coverts that fd into the underlying buffer, and returns
+ * the handle to use to refer to it further.
+ */
+struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+#endif /* __KERNEL__ */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:	size of the allocation
+ * @align:	required alignment of the allocation
+ * @flags:	flags passed to heap
+ * @handle:	pointer that will be populated with a cookie to use to refer
+ *		to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	size_t len;
+	size_t align;
+	unsigned int flags;
+	struct ion_handle *handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:	a handle
+ * @fd:		a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+	struct ion_handle *handle;
+	int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:	a handle
+ */
+struct ion_handle_data {
+	struct ion_handle *handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:	the custom ioctl function to call
+ * @arg:	additional data to pass to the custom ioctl, typically a user
+ *		pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+	unsigned int cmd;
+	unsigned long arg;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, int)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _LINUX_ION_H */
diff --git a/include/linux/kernel_debugger.h b/include/linux/kernel_debugger.h
new file mode 100644
index 0000000..b4dbfe9
--- /dev/null
+++ b/include/linux/kernel_debugger.h
@@ -0,0 +1,41 @@
+/*
+ * include/linux/kernel_debugger.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_KERNEL_DEBUGGER_H_
+#define _LINUX_KERNEL_DEBUGGER_H_
+
+struct kdbg_ctxt {
+	int (*printf)(void *cookie, const char *fmt, ...);
+	void *cookie;
+};
+
+/* kernel_debugger() is called from IRQ context and should
+ * use the kdbg_ctxt.printf to write output (do NOT call
+ * printk, do operations not safe from IRQ context, etc).
+ *
+ * kdbg_ctxt.printf will return -1 if there is not enough
+ * buffer space or if you are being aborted.  In this case
+ * you must return as soon as possible.
+ *
+ * Return non-zero if more data is available -- if buffer
+ * space ran and you had to stop, but could print more,
+ * for example.
+ *
+ * Additional calls where cmd is "more" will be made if
+ * the additional data is desired.
+ */
+int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd);
+
+#endif
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644
index 0000000..856a585
--- /dev/null
+++ b/include/linux/keychord.h
@@ -0,0 +1,52 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION		1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed.  A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+	/* should be KEYCHORD_VERSION */
+	__u16 version;
+	/*
+	 * client specified ID, returned from read()
+	 * when this keychord is pressed.
+	 */
+	__u16 id;
+
+	/* number of keycodes in this keychord */
+	__u16 count;
+
+	/* variable length array of keycodes */
+	__u16 keycodes[];
+};
+
+#endif	/* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 0000000..a2ac49e
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+	int (*reset_fn)(void);
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9670f71..de20025 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -871,6 +871,7 @@
 
 int shmem_lock(struct file *file, int lock, struct user_struct *user);
 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
 int shmem_zero_setup(struct vm_area_struct *);
 
 extern int can_do_mlock(void);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1ee4424..1584b52 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -265,6 +265,10 @@
 	const struct mmc_bus_ops *bus_ops;	/* current bus driver */
 	unsigned int		bus_refs;	/* reference counter */
 
+	unsigned int		bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME	(1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME	(1 << 1)
+
 	unsigned int		sdio_irqs;
 	struct task_struct	*sdio_irq_thread;
 	atomic_t		sdio_irq_thread_abort;
@@ -281,6 +285,15 @@
 
 	struct dentry		*debugfs_root;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	struct {
+		struct sdio_cis			*cis;
+		struct sdio_cccr		*cccr;
+		struct sdio_embedded_func	*funcs;
+		int				num_funcs;
+	} embedded_sdio_data;
+#endif
+
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
@@ -289,6 +302,14 @@
 extern void mmc_remove_host(struct mmc_host *);
 extern void mmc_free_host(struct mmc_host *);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				       struct sdio_cis *cis,
+				       struct sdio_cccr *cccr,
+				       struct sdio_embedded_func *funcs,
+				       int num_funcs);
+#endif
+
 static inline void *mmc_priv(struct mmc_host *host)
 {
 	return (void *)host->private;
@@ -299,6 +320,18 @@
 #define mmc_dev(x)	((x)->parent)
 #define mmc_classdev(x)	(&(x)->class_dev)
 #define mmc_hostname(x)	(dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+	if (manual)
+		host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+	else
+		host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
 
 extern int mmc_suspend_host(struct mmc_host *);
 extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index d37aac4..3903823 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -26,5 +26,6 @@
 
 #define MMC_PM_KEEP_POWER	(1 << 0)	/* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ	(1 << 1)	/* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY	(1 << 2)	/* ignore mmc pm notify */
 
 #endif
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
old mode 100644
new mode 100755
index 31baaf8..557acae
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -23,6 +23,14 @@
 typedef void (sdio_irq_handler_t)(struct sdio_func *);
 
 /*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+	uint8_t f_class;
+	uint32_t f_maxblksize;
+};
+
+/*
  * SDIO function CIS tuple (unknown to the core)
  */
 struct sdio_func_tuple {
@@ -130,6 +138,8 @@
 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
 
 extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+	unsigned in);
 extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
 extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
 
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 34066e6..f38d4f0 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -101,6 +101,7 @@
 /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
 #define FAT_IOCTL_GET_ATTRIBUTES	_IOR('r', 0x10, __u32)
 #define FAT_IOCTL_SET_ATTRIBUTES	_IOW('r', 0x11, __u32)
+#define VFAT_IOCTL_GET_VOLUME_ID	_IOR('r', 0x12, __u32)
 
 struct fat_boot_sector {
 	__u8	ignored[3];	/* Boot strap short or near jump */
@@ -138,6 +139,17 @@
 	__le32   reserved2[4];
 };
 
+struct fat_boot_bsx {
+	__u8     drive;		    /* drive number */
+	__u8     reserved1;
+	__u8     signature;	    /* extended boot signature */
+	__u8     vol_id[4];     /* volume ID */
+	__u8     vol_label[11]; /* volume label */
+	__u8     type[8];       /* file system type */
+};
+#define FAT16_BSX_OFFSET	36 /* offset of fat_boot_bsx in FAT12 and FAT16 */
+#define FAT32_BSX_OFFSET	64 /* offset of fat_boot_bsx in FAT32 */
+
 struct msdos_dir_entry {
 	__u8	name[MSDOS_NAME];/* name and extension */
 	__u8	attr;		/* attribute bits */
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644
index 0000000..ca60fbd
--- /dev/null
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID    XT_OWNER_UID
+#define XT_QTAGUID_GID    XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 0000000..eadc690
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+	XT_QUOTA_INVERT    = 1 << 0,
+	XT_QUOTA_GROW      = 1 << 1,
+	XT_QUOTA_PACKET    = 1 << 2,
+	XT_QUOTA_NO_CHANGE = 1 << 3,
+	XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+	char name[15];
+	u_int8_t flags;
+
+	/* Comparison-invariant */
+	aligned_u64 quota;
+
+	/* Used internally by the kernel */
+	struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
index 26d7217..6359456 100644
--- a/include/linux/netfilter/xt_socket.h
+++ b/include/linux/netfilter/xt_socket.h
@@ -11,4 +11,10 @@
 	__u8 flags;
 };
 
+void xt_socket_put_sk(struct sock *sk);
+struct sock *xt_socket_get4_sk(const struct sk_buff *skb,
+			       struct xt_action_param *par);
+struct sock *xt_socket_get6_sk(const struct sk_buff *skb,
+			       struct xt_action_param *par);
+
 #endif /* _XT_SOCKET_H */
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
index c9e4d81..2bb62bf 100644
--- a/include/linux/pda_power.h
+++ b/include/linux/pda_power.h
@@ -35,6 +35,8 @@
 	unsigned int polling_interval; /* msecs, default is 2000 */
 
 	unsigned long ac_max_uA; /* current to draw when on AC */
+
+	bool use_otg_notifier;
 };
 
 #endif /* __PDA_POWER_H__ */
diff --git a/include/linux/plist.h b/include/linux/plist.h
index c9b9f32..aa0fb39 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -77,14 +77,9 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/spinlock_types.h>
 
 struct plist_head {
 	struct list_head node_list;
-#ifdef CONFIG_DEBUG_PI_LIST
-	raw_spinlock_t *rawlock;
-	spinlock_t *spinlock;
-#endif
 };
 
 struct plist_node {
@@ -93,37 +88,13 @@
 	struct list_head	node_list;
 };
 
-#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock)		.spinlock = _lock
-# define PLIST_HEAD_LOCK_INIT_RAW(_lock)	.rawlock = _lock
-#else
-# define PLIST_HEAD_LOCK_INIT(_lock)
-# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
-#endif
-
-#define _PLIST_HEAD_INIT(head)				\
-	.node_list = LIST_HEAD_INIT((head).node_list)
-
 /**
  * PLIST_HEAD_INIT - static struct plist_head initializer
  * @head:	struct plist_head variable name
- * @_lock:	lock to initialize for this list
  */
-#define PLIST_HEAD_INIT(head, _lock)			\
+#define PLIST_HEAD_INIT(head)				\
 {							\
-	_PLIST_HEAD_INIT(head),				\
-	PLIST_HEAD_LOCK_INIT(&(_lock))			\
-}
-
-/**
- * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
- * @head:	struct plist_head variable name
- * @_lock:	lock to initialize for this list
- */
-#define PLIST_HEAD_INIT_RAW(head, _lock)		\
-{							\
-	_PLIST_HEAD_INIT(head),				\
-	PLIST_HEAD_LOCK_INIT_RAW(&(_lock))		\
+	.node_list = LIST_HEAD_INIT((head).node_list)	\
 }
 
 /**
@@ -141,31 +112,11 @@
 /**
  * plist_head_init - dynamic struct plist_head initializer
  * @head:	&struct plist_head pointer
- * @lock:	spinlock protecting the list (debugging)
  */
 static inline void
-plist_head_init(struct plist_head *head, spinlock_t *lock)
+plist_head_init(struct plist_head *head)
 {
 	INIT_LIST_HEAD(&head->node_list);
-#ifdef CONFIG_DEBUG_PI_LIST
-	head->spinlock = lock;
-	head->rawlock = NULL;
-#endif
-}
-
-/**
- * plist_head_init_raw - dynamic struct plist_head initializer
- * @head:	&struct plist_head pointer
- * @lock:	raw_spinlock protecting the list (debugging)
- */
-static inline void
-plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
-{
-	INIT_LIST_HEAD(&head->node_list);
-#ifdef CONFIG_DEBUG_PI_LIST
-	head->rawlock = lock;
-	head->spinlock = NULL;
-#endif
 }
 
 /**
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 204c18d..2287c321 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -14,6 +14,7 @@
 #define __LINUX_POWER_SUPPLY_H__
 
 #include <linux/device.h>
+#include <linux/wakelock.h>
 #include <linux/workqueue.h>
 #include <linux/leds.h>
 
@@ -163,6 +164,9 @@
 	/* private */
 	struct device *dev;
 	struct work_struct changed_work;
+	spinlock_t changed_lock;
+	bool changed;
+	struct wake_lock work_wake_lock;
 
 #ifdef CONFIG_LEDS_TRIGGERS
 	struct led_trigger *charging_full_trig;
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 8d522ff..de17134 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -66,7 +66,7 @@
 
 #define __RT_MUTEX_INITIALIZER(mutexname) \
 	{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
-	, .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
+	, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
 	, .owner = NULL \
 	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
 
@@ -100,7 +100,7 @@
 
 #ifdef CONFIG_RT_MUTEXES
 # define INIT_RT_MUTEXES(tsk)						\
-	.pi_waiters	= PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock),	\
+	.pi_waiters	= PLIST_HEAD_INIT(tsk.pi_waiters),	\
 	INIT_RT_MUTEX_DEBUG(tsk)
 #else
 # define INIT_RT_MUTEXES(tsk)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 496770a..f8d7369 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1745,6 +1745,9 @@
 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
 
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
 /*
  * Per process flags
  */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a5c3114..0d23989 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -245,6 +245,7 @@
 	void		(*pm)(struct uart_port *, unsigned int state,
 			      unsigned int oldstate);
 	int		(*set_wake)(struct uart_port *, unsigned int state);
+	void		(*wake_peer)(struct uart_port *);
 
 	/*
 	 * Return a string describing the type of the port
diff --git a/include/linux/sockios.h b/include/linux/sockios.h
index 7997a50..f7ffe36 100644
--- a/include/linux/sockios.h
+++ b/include/linux/sockios.h
@@ -65,6 +65,7 @@
 #define SIOCDIFADDR	0x8936		/* delete PA address		*/
 #define	SIOCSIFHWBROADCAST	0x8937	/* set hardware broadcast addr	*/
 #define SIOCGIFCOUNT	0x8938		/* get number of devices */
+#define SIOCKILLADDR	0x8939		/* kill sockets with this local addr */
 
 #define SIOCGIFBR	0x8940		/* Bridging support		*/
 #define SIOCSIFBR	0x8941		/* Set bridging options 	*/
diff --git a/include/linux/switch.h b/include/linux/switch.h
new file mode 100644
index 0000000..3e4c748
--- /dev/null
+++ b/include/linux/switch.h
@@ -0,0 +1,53 @@
+/*
+ *  Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+	const char	*name;
+	struct device	*dev;
+	int		index;
+	int		state;
+
+	ssize_t	(*print_name)(struct switch_dev *sdev, char *buf);
+	ssize_t	(*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+	const char *name;
+	unsigned 	gpio;
+
+	/* if NULL, switch_dev.name will be printed */
+	const char *name_on;
+	const char *name_off;
+	/* if NULL, "0" or "1" will be printed */
+	const char *state_on;
+	const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+	return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h
new file mode 100644
index 0000000..5539cc5
--- /dev/null
+++ b/include/linux/synaptics_i2c_rmi.h
@@ -0,0 +1,55 @@
+/*
+ * include/linux/synaptics_i2c_rmi.h - platform data structure for f75375s sensor
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNAPTICS_I2C_RMI_H
+#define _LINUX_SYNAPTICS_I2C_RMI_H
+
+#define SYNAPTICS_I2C_RMI_NAME "synaptics-rmi-ts"
+
+enum {
+	SYNAPTICS_FLIP_X = 1UL << 0,
+	SYNAPTICS_FLIP_Y = 1UL << 1,
+	SYNAPTICS_SWAP_XY = 1UL << 2,
+	SYNAPTICS_SNAP_TO_INACTIVE_EDGE = 1UL << 3,
+};
+
+struct synaptics_i2c_rmi_platform_data {
+	uint32_t version;	/* Use this entry for panels with */
+				/* (major << 8 | minor) version or above. */
+				/* If non-zero another array entry follows */
+	int (*power)(int on);	/* Only valid in first array entry */
+	uint32_t flags;
+	unsigned long irqflags;
+	uint32_t inactive_left; /* 0x10000 = screen width */
+	uint32_t inactive_right; /* 0x10000 = screen width */
+	uint32_t inactive_top; /* 0x10000 = screen height */
+	uint32_t inactive_bottom; /* 0x10000 = screen height */
+	uint32_t snap_left_on; /* 0x10000 = screen width */
+	uint32_t snap_left_off; /* 0x10000 = screen width */
+	uint32_t snap_right_on; /* 0x10000 = screen width */
+	uint32_t snap_right_off; /* 0x10000 = screen width */
+	uint32_t snap_top_on; /* 0x10000 = screen height */
+	uint32_t snap_top_off; /* 0x10000 = screen height */
+	uint32_t snap_bottom_on; /* 0x10000 = screen height */
+	uint32_t snap_bottom_off; /* 0x10000 = screen height */
+	uint32_t fuzz_x; /* 0x10000 = screen width */
+	uint32_t fuzz_y; /* 0x10000 = screen height */
+	int fuzz_p;
+	int fuzz_w;
+	int8_t sensitivity_adjust;
+};
+
+#endif /* _LINUX_SYNAPTICS_I2C_RMI_H */
diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h
new file mode 100644
index 0000000..6bd6c4e
--- /dev/null
+++ b/include/linux/uid_stat.h
@@ -0,0 +1,29 @@
+/* include/linux/uid_stat.h
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __uid_stat_h
+#define __uid_stat_h
+
+/* Contains definitions for resource tracking per uid. */
+
+#ifdef CONFIG_UID_STAT
+int uid_stat_tcp_snd(uid_t uid, int size);
+int uid_stat_tcp_rcv(uid_t uid, int size);
+#else
+#define uid_stat_tcp_snd(uid, size) do {} while (0);
+#define uid_stat_tcp_rcv(uid, size) do {} while (0);
+#endif
+
+#endif /* _LINUX_UID_STAT_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index b78cba4..66a29a9 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -240,6 +240,9 @@
 		struct usb_configuration *,
 		int (*)(struct usb_configuration *));
 
+int usb_remove_config(struct usb_composite_dev *,
+		struct usb_configuration *);
+
 /**
  * struct usb_composite_driver - groups configurations into a gadget
  * @name: For diagnostics, identifies the driver.
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 0000000..ebcc5f3
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,81 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version (currently 1)
+ *
+ *	requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_GET_PROTOCOL
+ *	value:          0
+ *	index:          0
+ *	data            version number (16 bits little endian)
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_STRING
+ *	value:          0
+ *	index:          string ID
+ *	data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_START
+ *	value:          0
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_START         53
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 0000000..fdf828c
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,47 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+
+struct mtp_file_range {
+	/* file descriptor for file to transfer */
+	int			fd;
+	/* offset in file for start of transfer */
+	loff_t  	offset;
+	/* number of bytes to transfer */
+	int64_t		length;
+};
+
+struct mtp_event {
+	/* size of the event */
+	size_t		length;
+	/* event data to send */
+	void  		*data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/usb/otg_id.h b/include/linux/usb/otg_id.h
new file mode 100644
index 0000000..b686ab0
--- /dev/null
+++ b/include/linux/usb/otg_id.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *	Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_OTG_ID_H
+#define __LINUX_USB_OTG_ID_H
+
+#include <linux/notifier.h>
+#include <linux/plist.h>
+
+/**
+ * otg_id_notifier_block
+ *
+ * @priority: Order the notifications will be called in.  Higher numbers
+ *    get called first.
+ * @detect: Called during otg_id_notify.  Return OTG_ID_HANDLED if the USB cable
+ *    has been identified
+ * @cancel: Called after detect has returned OTG_ID_HANDLED to ask it to
+ *    release detection resources to allow a new identification to occur.
+ */
+
+struct otg_id_notifier_block {
+	int priority;
+	int (*detect)(struct otg_id_notifier_block *otg_id_nb);
+	void (*cancel)(struct otg_id_notifier_block *otg_id_nb);
+	struct plist_node p;
+};
+
+#define OTG_ID_HANDLED 1
+#define OTG_ID_UNHANDLED 0
+
+int otg_id_register_notifier(struct otg_id_notifier_block *otg_id_nb);
+void otg_id_unregister_notifier(struct otg_id_notifier_block *otg_id_nb);
+
+void otg_id_notify(void);
+
+#endif /* __LINUX_USB_OTG_ID_H */
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100755
index 0000000..a096d24
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,91 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/list.h>
+#include <linux/ktime.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend. If the type is WAKE_LOCK_IDLE, low power
+ * states that cause large interrupt latencies or that disable a set of
+ * interrupts will not entered from idle until the wake_locks are released.
+ */
+
+enum {
+	WAKE_LOCK_SUSPEND, /* Prevent suspend */
+	WAKE_LOCK_IDLE,    /* Prevent low power idle */
+	WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+#ifdef CONFIG_HAS_WAKELOCK
+	struct list_head    link;
+	int                 flags;
+	const char         *name;
+	unsigned long       expires;
+#ifdef CONFIG_WAKELOCK_STAT
+	struct {
+		int             count;
+		int             expire_count;
+		int             wakeup_count;
+		ktime_t         total_time;
+		ktime_t         prevent_suspend_time;
+		ktime_t         max_time;
+		ktime_t         last_time;
+	} stat;
+#endif
+#endif
+};
+
+#ifdef CONFIG_HAS_WAKELOCK
+
+void wake_lock_init(struct wake_lock *lock, int type, const char *name);
+void wake_lock_destroy(struct wake_lock *lock);
+void wake_lock(struct wake_lock *lock);
+void wake_lock_timeout(struct wake_lock *lock, long timeout);
+void wake_unlock(struct wake_lock *lock);
+
+/* wake_lock_active returns a non-zero value if the wake_lock is currently
+ * locked. If the wake_lock has a timeout, it does not check the timeout
+ * but if the timeout had aready been checked it will return 0.
+ */
+int wake_lock_active(struct wake_lock *lock);
+
+/* has_wake_lock returns 0 if no wake locks of the specified type are active,
+ * and non-zero if one or more wake locks are held. Specifically it returns
+ * -1 if one or more wake locks with no timeout are active or the
+ * number of jiffies until all active wake locks time out.
+ */
+long has_wake_lock(int type);
+
+#else
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+					const char *name) {}
+static inline void wake_lock_destroy(struct wake_lock *lock) {}
+static inline void wake_lock(struct wake_lock *lock) {}
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout) {}
+static inline void wake_unlock(struct wake_lock *lock) {}
+
+static inline int wake_lock_active(struct wake_lock *lock) { return 0; }
+static inline long has_wake_lock(int type) { return 0; }
+
+#endif
+
+#endif
+
diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h
new file mode 100644
index 0000000..f07e067
--- /dev/null
+++ b/include/linux/wifi_tiwlan.h
@@ -0,0 +1,27 @@
+/* include/linux/wifi_tiwlan.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+#include <linux/wlan_plat.h>
+
+#define WMPA_NUMBER_OF_SECTIONS	3
+#define WMPA_NUMBER_OF_BUFFERS	160
+#define WMPA_SECTION_HEADER	24
+#define WMPA_SECTION_SIZE_0	(WMPA_NUMBER_OF_BUFFERS * 64)
+#define WMPA_SECTION_SIZE_1	(WMPA_NUMBER_OF_BUFFERS * 256)
+#define WMPA_SECTION_SIZE_2	(WMPA_NUMBER_OF_BUFFERS * 2048)
+
+#endif
diff --git a/include/linux/wl127x-rfkill.h b/include/linux/wl127x-rfkill.h
new file mode 100644
index 0000000..9057ec6
--- /dev/null
+++ b/include/linux/wl127x-rfkill.h
@@ -0,0 +1,35 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _LINUX_WL127X_RFKILL_H
+#define _LINUX_WL127X_RFKILL_H
+
+#include <linux/rfkill.h>
+
+struct wl127x_rfkill_platform_data {
+	int nshutdown_gpio;
+
+	struct rfkill *rfkill;  /* for driver only */
+};
+
+#endif
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644
index 0000000..40ec348
--- /dev/null
+++ b/include/linux/wlan_plat.h
@@ -0,0 +1,27 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+struct wifi_platform_data {
+	int (*set_power)(int val);
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+	void *(*get_country_code)(char *ccode);
+};
+
+#endif
diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h
new file mode 100644
index 0000000..10e4c15
--- /dev/null
+++ b/include/net/activity_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#ifndef __activity_stats_h
+#define __activity_stats_h
+
+#ifdef CONFIG_NET_ACTIVITY_STATS
+void activity_stats_update(void);
+#else
+#define activity_stats_update(void) {}
+#endif
+
+#endif /* _NET_ACTIVITY_STATS_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 4375043..e727555 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -56,6 +56,7 @@
 #define BT_SECURITY	4
 struct bt_security {
 	__u8 level;
+	__u8 key_size;
 };
 #define BT_SECURITY_SDP		0
 #define BT_SECURITY_LOW		1
@@ -69,9 +70,19 @@
 #define BT_FLUSHABLE_OFF	0
 #define BT_FLUSHABLE_ON		1
 
-#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg)
-#define BT_ERR(fmt, arg...)  printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
-#define BT_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n" , __func__ , ## arg)
+#define BT_POWER	9
+struct bt_power {
+	__u8 force_active;
+};
+#define BT_POWER_FORCE_ACTIVE_OFF 0
+#define BT_POWER_FORCE_ACTIVE_ON  1
+
+__attribute__((format (printf, 2, 3)))
+int bt_printk(const char *level, const char *fmt, ...);
+
+#define BT_INFO(fmt, arg...)   bt_printk(KERN_INFO, pr_fmt(fmt), ##arg)
+#define BT_ERR(fmt, arg...)    bt_printk(KERN_ERR, pr_fmt(fmt), ##arg)
+#define BT_DBG(fmt, arg...)    pr_debug(fmt "\n", ##arg)
 
 /* Connection and socket states */
 enum {
@@ -130,7 +141,8 @@
 int  bt_sock_unregister(int proto);
 void bt_sock_link(struct bt_sock_list *l, struct sock *s);
 void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
-int  bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags);
+int  bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+				struct msghdr *msg, size_t len, int flags);
 int  bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 			struct msghdr *msg, size_t len, int flags);
 uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
@@ -150,6 +162,7 @@
 	__u8 retries;
 	__u8 sar;
 	unsigned short channel;
+	__u8 force_active;
 };
 #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
 
@@ -164,8 +177,8 @@
 	return skb;
 }
 
-static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len, 
-							int nb, int *err)
+static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
+					unsigned long len, int nb, int *err)
 {
 	struct sk_buff *skb;
 
@@ -195,7 +208,7 @@
 	return NULL;
 }
 
-int bt_err(__u16 code);
+int bt_to_errno(__u16 code);
 
 extern int hci_sock_init(void);
 extern void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 0c20227..0489b8b 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -157,8 +157,10 @@
 #define ESCO_2EV5	0x0100
 #define ESCO_3EV5	0x0200
 
-#define SCO_ESCO_MASK  (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
-#define EDR_ESCO_MASK  (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define SCO_ESCO_MASK	(ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK	(ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define ALL_ESCO_MASK	(SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
+			EDR_ESCO_MASK)
 
 /* ACL flags */
 #define ACL_START_NO_FLUSH	0x00
@@ -211,11 +213,16 @@
 #define LMP_EDR_3S_ESCO	0x80
 
 #define LMP_EXT_INQ	0x01
+#define LMP_SIMUL_LE_BR	0x02
 #define LMP_SIMPLE_PAIR	0x08
 #define LMP_NO_FLUSH	0x40
 
 #define LMP_LSTO	0x01
 #define LMP_INQ_TX_PWR	0x02
+#define LMP_EXTFEATURES	0x80
+
+/* Extended LMP features */
+#define LMP_HOST_LE	0x02
 
 /* Connection modes */
 #define HCI_CM_ACTIVE	0x0000
@@ -254,6 +261,10 @@
 #define HCI_LK_UNAUTH_COMBINATION	0x04
 #define HCI_LK_AUTH_COMBINATION		0x05
 #define HCI_LK_CHANGED_COMBINATION	0x06
+/* The spec doesn't define types for SMP keys */
+#define HCI_LK_SMP_LTK			0x81
+#define HCI_LK_SMP_IRK			0x82
+#define HCI_LK_SMP_CSRK			0x83
 
 /* -----  HCI Commands ---- */
 #define HCI_OP_NOP			0x0000
@@ -653,6 +664,12 @@
 
 #define HCI_OP_READ_INQ_RSP_TX_POWER	0x0c58
 
+#define HCI_OP_WRITE_LE_HOST_SUPPORTED	0x0c6d
+struct hci_cp_write_le_host_supported {
+	__u8 le;
+	__u8 simul;
+} __packed;
+
 #define HCI_OP_READ_LOCAL_VERSION	0x1001
 struct hci_rp_read_local_version {
 	__u8     status;
@@ -676,6 +693,9 @@
 } __packed;
 
 #define HCI_OP_READ_LOCAL_EXT_FEATURES	0x1004
+struct hci_cp_read_local_ext_features {
+	__u8     page;
+} __packed;
 struct hci_rp_read_local_ext_features {
 	__u8     status;
 	__u8     page;
@@ -710,6 +730,12 @@
 	__u8     le_max_pkt;
 } __packed;
 
+#define HCI_OP_LE_SET_SCAN_ENABLE	0x200c
+struct hci_cp_le_set_scan_enable {
+	__u8     enable;
+	__u8     filter_dup;
+} __packed;
+
 #define HCI_OP_LE_CREATE_CONN		0x200d
 struct hci_cp_le_create_conn {
 	__le16   scan_interval;
@@ -739,6 +765,33 @@
 	__le16   max_ce_len;
 } __packed;
 
+#define HCI_OP_LE_START_ENC		0x2019
+struct hci_cp_le_start_enc {
+	__le16	handle;
+	__u8	rand[8];
+	__le16	ediv;
+	__u8	ltk[16];
+} __packed;
+
+#define HCI_OP_LE_LTK_REPLY		0x201a
+struct hci_cp_le_ltk_reply {
+	__le16	handle;
+	__u8	ltk[16];
+} __packed;
+struct hci_rp_le_ltk_reply {
+	__u8	status;
+	__le16	handle;
+} __packed;
+
+#define HCI_OP_LE_LTK_NEG_REPLY		0x201b
+struct hci_cp_le_ltk_neg_reply {
+	__le16	handle;
+} __packed;
+struct hci_rp_le_ltk_neg_reply {
+	__u8	status;
+	__le16	handle;
+} __packed;
+
 /* ---- HCI Events ---- */
 #define HCI_EV_INQUIRY_COMPLETE		0x01
 
@@ -1029,6 +1082,32 @@
 	__u8     clk_accurancy;
 } __packed;
 
+#define HCI_EV_LE_LTK_REQ		0x05
+struct hci_ev_le_ltk_req {
+	__le16	handle;
+	__u8	random[8];
+	__le16	ediv;
+} __packed;
+
+/* Advertising report event types */
+#define ADV_IND		0x00
+#define ADV_DIRECT_IND	0x01
+#define ADV_SCAN_IND	0x02
+#define ADV_NONCONN_IND	0x03
+#define ADV_SCAN_RSP	0x04
+
+#define ADDR_LE_DEV_PUBLIC	0x00
+#define ADDR_LE_DEV_RANDOM	0x01
+
+#define HCI_EV_LE_ADVERTISING_REPORT	0x02
+struct hci_ev_le_advertising_info {
+	__u8	 evt_type;
+	__u8	 bdaddr_type;
+	bdaddr_t bdaddr;
+	__u8	 length;
+	__u8	 data[0];
+} __packed;
+
 /* Internal events generated by Bluetooth stack */
 #define HCI_EV_STACK_INTERNAL	0xfd
 struct hci_ev_stack_internal {
@@ -1184,6 +1263,9 @@
 	__u8     out;
 	__u16    state;
 	__u32    link_mode;
+	__u32    mtu;
+	__u32    cnt;
+	__u32    pkts;
 };
 
 struct hci_dev_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6c994c0..27e1b36 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -74,12 +74,28 @@
 	u8 svc_hint;
 };
 
+struct key_master_id {
+	__le16 ediv;
+	u8 rand[8];
+} __packed;
+
+struct link_key_data {
+	bdaddr_t bdaddr;
+	u8 type;
+	u8 val[16];
+	u8 pin_len;
+	u8 dlen;
+	u8 data[0];
+} __packed;
+
 struct link_key {
 	struct list_head list;
 	bdaddr_t bdaddr;
 	u8 type;
 	u8 val[16];
 	u8 pin_len;
+	u8 dlen;
+	u8 data[0];
 };
 
 struct oob_data {
@@ -89,6 +105,12 @@
 	u8 randomizer[16];
 };
 
+struct adv_entry {
+	struct list_head list;
+	bdaddr_t bdaddr;
+	u8 bdaddr_type;
+};
+
 #define NUM_REASSEMBLY 4
 struct hci_dev {
 	struct list_head list;
@@ -107,6 +129,7 @@
 	__u8		major_class;
 	__u8		minor_class;
 	__u8		features[8];
+	__u8		extfeatures[8];
 	__u8		commands[64];
 	__u8		ssp_mode;
 	__u8		hci_ver;
@@ -171,6 +194,8 @@
 
 	__u16			init_last_cmd;
 
+	struct crypto_blkcipher	*tfm;
+
 	struct inquiry_cache	inq_cache;
 	struct hci_conn_hash	conn_hash;
 	struct list_head	blacklist;
@@ -181,6 +206,9 @@
 
 	struct list_head	remote_oob_data;
 
+	struct list_head	adv_entries;
+	struct timer_list	adv_timer;
+
 	struct hci_dev_stats	stat;
 
 	struct sk_buff_head	driver_init;
@@ -212,9 +240,9 @@
 	struct list_head list;
 
 	atomic_t	refcnt;
-	spinlock_t	lock;
 
 	bdaddr_t	dst;
+	__u8		dst_type;
 	__u16		handle;
 	__u16		state;
 	__u8		mode;
@@ -233,6 +261,7 @@
 	__u8		sec_level;
 	__u8		pending_sec_level;
 	__u8		pin_length;
+	__u8		enc_key_size;
 	__u8		io_capability;
 	__u8		power_save;
 	__u16		disc_timeout;
@@ -259,7 +288,6 @@
 	struct hci_dev	*hdev;
 	void		*l2cap_data;
 	void		*sco_data;
-	void		*priv;
 
 	struct hci_conn	*link;
 
@@ -307,12 +335,14 @@
 	return jiffies - e->timestamp;
 }
 
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+							bdaddr_t *bdaddr);
 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
 
 /* ----- HCI Connections ----- */
 enum {
 	HCI_CONN_AUTH_PEND,
+	HCI_CONN_REAUTH_PEND,
 	HCI_CONN_ENCRYPT_PEND,
 	HCI_CONN_RSWITCH_PEND,
 	HCI_CONN_MODE_CHANGE_PEND,
@@ -415,19 +445,22 @@
 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+					__u16 pkt_type, bdaddr_t *dst);
 int hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+						__u16 pkt_type, bdaddr_t *dst,
+						__u8 sec_level, __u8 auth_type);
 int hci_conn_check_link_mode(struct hci_conn *conn);
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
 int hci_conn_change_link_key(struct hci_conn *conn);
 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
 
-void hci_conn_enter_active_mode(struct hci_conn *conn);
+void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
 
 void hci_conn_hold_device(struct hci_conn *conn);
@@ -448,11 +481,13 @@
 			if (conn->state == BT_CONNECTED) {
 				timeo = msecs_to_jiffies(conn->disc_timeout);
 				if (!conn->out)
-					timeo *= 2;
-			} else
+					timeo *= 20;
+			} else {
 				timeo = msecs_to_jiffies(10);
-		} else
+			}
+		} else {
 			timeo = msecs_to_jiffies(10);
+		}
 		mod_timer(&conn->disc_timer, jiffies + timeo);
 	}
 }
@@ -511,6 +546,8 @@
 
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int hci_blacklist_clear(struct hci_dev *hdev);
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
 int hci_uuids_clear(struct hci_dev *hdev);
 
@@ -518,6 +555,11 @@
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
 			bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
+struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
+struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
+					bdaddr_t *bdaddr, u8 type);
+int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+			u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]);
 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
 int hci_remote_oob_data_clear(struct hci_dev *hdev);
@@ -527,6 +569,12 @@
 								u8 *randomizer);
 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
+#define ADV_CLEAR_TIMEOUT (3*60*HZ) /* Three minutes */
+int hci_adv_entries_clear(struct hci_dev *hdev);
+struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int hci_add_adv_entry(struct hci_dev *hdev,
+					struct hci_ev_le_advertising_info *ev);
+
 void hci_del_off_timer(struct hci_dev *hdev);
 
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -553,6 +601,9 @@
 #define lmp_no_flush_capable(dev)  ((dev)->features[6] & LMP_NO_FLUSH)
 #define lmp_le_capable(dev)        ((dev)->features[4] & LMP_LE)
 
+/* ----- Extended LMP capabilities ----- */
+#define lmp_host_le_capable(dev)   ((dev)->extfeatures[0] & LMP_HOST_LE)
+
 /* ----- HCI protocols ----- */
 struct hci_proto {
 	char		*name;
@@ -561,16 +612,20 @@
 
 	void		*priv;
 
-	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
+	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								__u8 type);
 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
 	int (*disconn_ind)	(struct hci_conn *conn);
 	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
-	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb,
+								__u16 flags);
 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
-	int (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
+	int (*security_cfm)	(struct hci_conn *conn, __u8 status,
+								__u8 encrypt);
 };
 
-static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+								__u8 type)
 {
 	register struct hci_proto *hp;
 	int mask = 0;
@@ -656,7 +711,8 @@
 		conn->security_cfm_cb(conn, status);
 }
 
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
+								__u8 encrypt)
 {
 	register struct hci_proto *hp;
 
@@ -681,7 +737,8 @@
 
 	char *name;
 
-	void (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
+	void (*security_cfm)	(struct hci_conn *conn, __u8 status,
+								__u8 encrypt);
 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
 };
@@ -707,13 +764,17 @@
 	read_unlock_bh(&hci_cb_list_lock);
 }
 
-static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
+								__u8 encrypt)
 {
 	struct list_head *p;
 
 	if (conn->sec_level == BT_SECURITY_SDP)
 		conn->sec_level = BT_SECURITY_LOW;
 
+	if (conn->pending_sec_level > conn->sec_level)
+		conn->sec_level = conn->pending_sec_level;
+
 	hci_proto_encrypt_cfm(conn, status, encrypt);
 
 	read_lock_bh(&hci_cb_list_lock);
@@ -738,7 +799,8 @@
 	read_unlock_bh(&hci_cb_list_lock);
 }
 
-static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
+static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+								__u8 role)
 {
 	struct list_head *p;
 
@@ -830,4 +892,9 @@
 
 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
 					u16 latency, u16 to_multiplier);
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
+							__u8 ltk[16]);
+void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
+void hci_le_ltk_neg_reply(struct hci_conn *conn);
+
 #endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index d09c9b1..578545a 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -37,7 +37,6 @@
 #define L2CAP_DEFAULT_MONITOR_TO	12000   /* 12 seconds */
 #define L2CAP_DEFAULT_MAX_PDU_SIZE	1009    /* Sized for 3-DH5 packet */
 #define L2CAP_DEFAULT_ACK_TO		200
-#define L2CAP_LOCAL_BUSY_TRIES		12
 #define L2CAP_LE_DEFAULT_MTU		23
 
 #define L2CAP_CONN_TIMEOUT	(40000) /* 40 seconds */
@@ -287,6 +286,10 @@
 
 	struct l2cap_conn	*conn;
 
+	__u8		state;
+
+	atomic_t	refcnt;
+
 	__le16		psm;
 	__u16		dcid;
 	__u16		scid;
@@ -295,6 +298,7 @@
 	__u16		omtu;
 	__u16		flush_to;
 	__u8		mode;
+	__u8		chan_type;
 
 	__le16		sport;
 
@@ -302,6 +306,7 @@
 	__u8		role_switch;
 	__u8		force_reliable;
 	__u8		flushable;
+	__u8		force_active;
 
 	__u8		ident;
 
@@ -318,8 +323,8 @@
 	__u16		monitor_timeout;
 	__u16		mps;
 
-	__u8		conf_state;
-	__u16		conn_state;
+	unsigned long	conf_state;
+	unsigned long	conn_state;
 
 	__u8		next_tx_seq;
 	__u8		expected_ack_seq;
@@ -339,18 +344,29 @@
 	__u8		remote_max_tx;
 	__u16		remote_mps;
 
+	struct timer_list	chan_timer;
 	struct timer_list	retrans_timer;
 	struct timer_list	monitor_timer;
 	struct timer_list	ack_timer;
 	struct sk_buff		*tx_send_head;
 	struct sk_buff_head	tx_q;
 	struct sk_buff_head	srej_q;
-	struct sk_buff_head	busy_q;
-	struct work_struct	busy_work;
 	struct list_head	srej_l;
 
 	struct list_head list;
 	struct list_head global_l;
+
+	void		*data;
+	struct l2cap_ops *ops;
+};
+
+struct l2cap_ops {
+	char		*name;
+
+	struct l2cap_chan	*(*new_connection) (void *data);
+	int			(*recv) (void *data, struct sk_buff *skb);
+	void			(*close) (void *data);
+	void			(*state_change) (void *data, int state);
 };
 
 struct l2cap_conn {
@@ -376,6 +392,15 @@
 
 	__u8		disc_reason;
 
+	__u8		preq[7]; /* SMP Pairing Request */
+	__u8		prsp[7]; /* SMP Pairing Response */
+	__u8		prnd[16]; /* SMP Pairing Random */
+	__u8		pcnf[16]; /* SMP Pairing Confirm */
+	__u8		tk[16]; /* SMP Temporary Key */
+	__u8		smp_key_size;
+
+	struct timer_list security_timer;
+
 	struct list_head chan_l;
 	rwlock_t	chan_lock;
 };
@@ -384,44 +409,57 @@
 #define L2CAP_INFO_FEAT_MASK_REQ_SENT	0x04
 #define L2CAP_INFO_FEAT_MASK_REQ_DONE	0x08
 
+#define L2CAP_CHAN_RAW			1
+#define L2CAP_CHAN_CONN_LESS		2
+#define L2CAP_CHAN_CONN_ORIENTED	3
+
 /* ----- L2CAP socket info ----- */
 #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
 
 struct l2cap_pinfo {
 	struct bt_sock	bt;
 	struct l2cap_chan	*chan;
+	struct sk_buff	*rx_busy_skb;
 };
 
-#define L2CAP_CONF_REQ_SENT       0x01
-#define L2CAP_CONF_INPUT_DONE     0x02
-#define L2CAP_CONF_OUTPUT_DONE    0x04
-#define L2CAP_CONF_MTU_DONE       0x08
-#define L2CAP_CONF_MODE_DONE      0x10
-#define L2CAP_CONF_CONNECT_PEND   0x20
-#define L2CAP_CONF_NO_FCS_RECV    0x40
-#define L2CAP_CONF_STATE2_DEVICE  0x80
+enum {
+	CONF_REQ_SENT,
+	CONF_INPUT_DONE,
+	CONF_OUTPUT_DONE,
+	CONF_MTU_DONE,
+	CONF_MODE_DONE,
+	CONF_CONNECT_PEND,
+	CONF_NO_FCS_RECV,
+	CONF_STATE2_DEVICE,
+};
 
 #define L2CAP_CONF_MAX_CONF_REQ 2
 #define L2CAP_CONF_MAX_CONF_RSP 2
 
-#define L2CAP_CONN_SAR_SDU         0x0001
-#define L2CAP_CONN_SREJ_SENT       0x0002
-#define L2CAP_CONN_WAIT_F          0x0004
-#define L2CAP_CONN_SREJ_ACT        0x0008
-#define L2CAP_CONN_SEND_PBIT       0x0010
-#define L2CAP_CONN_REMOTE_BUSY     0x0020
-#define L2CAP_CONN_LOCAL_BUSY      0x0040
-#define L2CAP_CONN_REJ_ACT         0x0080
-#define L2CAP_CONN_SEND_FBIT       0x0100
-#define L2CAP_CONN_RNR_SENT        0x0200
-#define L2CAP_CONN_SAR_RETRY       0x0400
+enum {
+	CONN_SAR_SDU,
+	CONN_SREJ_SENT,
+	CONN_WAIT_F,
+	CONN_SREJ_ACT,
+	CONN_SEND_PBIT,
+	CONN_REMOTE_BUSY,
+	CONN_LOCAL_BUSY,
+	CONN_REJ_ACT,
+	CONN_SEND_FBIT,
+	CONN_RNR_SENT,
+};
 
-#define __mod_retrans_timer() mod_timer(&chan->retrans_timer, \
-		jiffies +  msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
-#define __mod_monitor_timer() mod_timer(&chan->monitor_timer, \
-		jiffies + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
-#define __mod_ack_timer() mod_timer(&chan->ack_timer, \
-		jiffies + msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
+#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
+#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
+		L2CAP_DEFAULT_RETRANS_TO);
+#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
+#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \
+		L2CAP_DEFAULT_MONITOR_TO);
+#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
+#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
+		L2CAP_DEFAULT_ACK_TO);
+#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
 
 static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
 {
@@ -446,32 +484,17 @@
 int l2cap_init_sockets(void);
 void l2cap_cleanup_sockets(void);
 
-void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
 int __l2cap_wait_ack(struct sock *sk);
 
-struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
-struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
-struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen);
-int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
-void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb);
-void l2cap_streaming_send(struct l2cap_chan *chan);
-int l2cap_ertm_send(struct l2cap_chan *chan);
-
 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid);
 
-void l2cap_sock_set_timer(struct sock *sk, long timeout);
-void l2cap_sock_clear_timer(struct sock *sk);
-void __l2cap_sock_close(struct sock *sk, int reason);
-void l2cap_sock_kill(struct sock *sk);
-void l2cap_sock_init(struct sock *sk, struct sock *parent);
-struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
-							int proto, gfp_t prio);
-void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err);
 struct l2cap_chan *l2cap_chan_create(struct sock *sk);
-void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_chan_close(struct l2cap_chan *chan, int reason);
 void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
 
 #endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 4899286..5428fd3 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -101,6 +101,8 @@
 	u8 type;
 	u8 val[16];
 	u8 pin_len;
+	u8 dlen;
+	u8 data[0];
 } __packed;
 
 #define MGMT_OP_LOAD_KEYS		0x000D
@@ -199,6 +201,16 @@
 
 #define MGMT_OP_STOP_DISCOVERY		0x001C
 
+#define MGMT_OP_BLOCK_DEVICE		0x001D
+struct mgmt_cp_block_device {
+	bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_UNBLOCK_DEVICE		0x001E
+struct mgmt_cp_unblock_device {
+	bdaddr_t bdaddr;
+} __packed;
+
 #define MGMT_EV_CMD_COMPLETE		0x0001
 struct mgmt_ev_cmd_complete {
 	__le16 opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 6eac4a7..d5eee20 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -234,7 +234,8 @@
 /* ---- RFCOMM DLCs (channels) ---- */
 struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
 void rfcomm_dlc_free(struct rfcomm_dlc *d);
-int  rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
+int  rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
+								u8 channel);
 int  rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
 int  rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
 int  rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
@@ -271,7 +272,8 @@
 }
 
 /* ---- RFCOMM sessions ---- */
-void   rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst);
+void   rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src,
+								bdaddr_t *dst);
 
 static inline void rfcomm_session_hold(struct rfcomm_session *s)
 {
@@ -312,7 +314,8 @@
 int  rfcomm_init_sockets(void);
 void rfcomm_cleanup_sockets(void);
 
-int  rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d);
+int  rfcomm_connect_ind(struct rfcomm_session *s, u8 channel,
+							struct rfcomm_dlc **d);
 
 /* ---- RFCOMM TTY ---- */
 #define RFCOMM_MAX_DEV  256
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1e35c43..6d1857a 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -37,6 +37,7 @@
 struct sockaddr_sco {
 	sa_family_t	sco_family;
 	bdaddr_t	sco_bdaddr;
+	__u16		sco_pkt_type;
 };
 
 /* SCO socket options */
@@ -72,7 +73,8 @@
 
 struct sco_pinfo {
 	struct bt_sock	bt;
-	__u32		flags;
+	__u16		pkt_type;
+
 	struct sco_conn	*conn;
 };
 
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 8f2edbf..46c4576 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -1,3 +1,25 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
 #ifndef __SMP_H
 #define __SMP_H
 
@@ -16,6 +38,23 @@
 	__u8	resp_key_dist;
 } __packed;
 
+#define SMP_IO_DISPLAY_ONLY	0x00
+#define SMP_IO_DISPLAY_YESNO	0x01
+#define SMP_IO_KEYBOARD_ONLY	0x02
+#define SMP_IO_NO_INPUT_OUTPUT	0x03
+#define SMP_IO_KEYBOARD_DISPLAY	0x04
+
+#define SMP_OOB_NOT_PRESENT	0x00
+#define SMP_OOB_PRESENT		0x01
+
+#define SMP_DIST_ENC_KEY	0x01
+#define SMP_DIST_ID_KEY		0x02
+#define SMP_DIST_SIGN		0x04
+
+#define SMP_AUTH_NONE		0x00
+#define SMP_AUTH_BONDING	0x01
+#define SMP_AUTH_MITM		0x04
+
 #define SMP_CMD_PAIRING_CONFIRM	0x03
 struct smp_cmd_pairing_confirm {
 	__u8	confirm_val[16];
@@ -73,4 +112,12 @@
 #define SMP_UNSPECIFIED		0x08
 #define SMP_REPEATED_ATTEMPTS		0x09
 
+#define SMP_MIN_ENC_KEY_SIZE		7
+#define SMP_MAX_ENC_KEY_SIZE		16
+
+/* SMP Commands */
+int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
+int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
+int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+
 #endif /* __SMP_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cda30ea..7377393 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1404,6 +1404,8 @@
 extern int tcp_gro_complete(struct sk_buff *skb);
 extern int tcp4_gro_complete(struct sk_buff *skb);
 
+extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
+
 #ifdef CONFIG_PROC_FS
 extern int tcp4_proc_init(void);
 extern void tcp4_proc_exit(void);
diff --git a/init/Kconfig b/init/Kconfig
index 412c21b..4fe9168 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -915,6 +915,12 @@
 config ANON_INODES
 	bool
 
+config PANIC_TIMEOUT
+	int "Default panic timeout"
+	default 0
+	help
+	  Set default panic timeout.
+
 menuconfig EXPERT
 	bool "Configure standard kernel features (expert users)"
 	help
@@ -1081,6 +1087,15 @@
 	  option replaces shmem and tmpfs with the much simpler ramfs code,
 	  which may be appropriate on small systems without swap.
 
+config ASHMEM
+	bool "Enable the Anonymous Shared Memory Subsystem"
+	default n
+	depends on SHMEM || TINY_SHMEM
+	help
+	  The ashmem subsystem is a new shared memory allocator, similar to
+	  POSIX SHM but with different behavior and sporting a simpler
+	  file-based API.
+
 config AIO
 	bool "Enable AIO support" if EXPERT
 	default y
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2731d11..01ab75d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -58,6 +58,7 @@
 #include <linux/eventfd.h>
 #include <linux/poll.h>
 #include <linux/flex_array.h> /* used in cgroup_attach_proc */
+#include <linux/capability.h>
 
 #include <asm/atomic.h>
 
@@ -268,6 +269,33 @@
 static DECLARE_WORK(release_agent_work, cgroup_release_agent);
 static void check_for_release(struct cgroup *cgrp);
 
+/*
+ * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
+ * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
+ * reference to css->refcnt. In general, this refcnt is expected to goes down
+ * to zero, soon.
+ *
+ * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
+ */
+DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+
+static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
+{
+	if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
+		wake_up_all(&cgroup_rmdir_waitq);
+}
+
+void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
+{
+	css_get(css);
+}
+
+void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
+{
+	cgroup_wakeup_rmdir_waiter(css->cgroup);
+	css_put(css);
+}
+
 /* Link structure for associating css_set objects with cgroups */
 struct cg_cgroup_link {
 	/*
@@ -327,16 +355,53 @@
 	return &css_set_table[index];
 }
 
+static void free_css_set_work(struct work_struct *work)
+{
+	struct css_set *cg = container_of(work, struct css_set, work);
+	struct cg_cgroup_link *link;
+	struct cg_cgroup_link *saved_link;
+
+	write_lock(&css_set_lock);
+	list_for_each_entry_safe(link, saved_link, &cg->cg_links,
+				 cg_link_list) {
+		struct cgroup *cgrp = link->cgrp;
+		list_del(&link->cg_link_list);
+		list_del(&link->cgrp_link_list);
+		if (atomic_dec_and_test(&cgrp->count)) {
+			check_for_release(cgrp);
+			cgroup_wakeup_rmdir_waiter(cgrp);
+		}
+		kfree(link);
+	}
+	write_unlock(&css_set_lock);
+
+	kfree(cg);
+}
+
+static void free_css_set_rcu(struct rcu_head *obj)
+{
+	struct css_set *cg = container_of(obj, struct css_set, rcu_head);
+
+	INIT_WORK(&cg->work, free_css_set_work);
+	schedule_work(&cg->work);
+}
+
 /* We don't maintain the lists running through each css_set to its
  * task until after the first call to cgroup_iter_start(). This
  * reduces the fork()/exit() overhead for people who have cgroups
  * compiled into their kernel but not actually in use */
 static int use_task_css_set_links __read_mostly;
 
-static void __put_css_set(struct css_set *cg, int taskexit)
+/*
+ * refcounted get/put for css_set objects
+ */
+static inline void get_css_set(struct css_set *cg)
 {
-	struct cg_cgroup_link *link;
-	struct cg_cgroup_link *saved_link;
+	atomic_inc(&cg->refcount);
+}
+
+static void put_css_set(struct css_set *cg)
+{
 	/*
 	 * Ensure that the refcount doesn't hit zero while any readers
 	 * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -350,45 +415,11 @@
 		return;
 	}
 
-	/* This css_set is dead. unlink it and release cgroup refcounts */
 	hlist_del(&cg->hlist);
 	css_set_count--;
 
-	list_for_each_entry_safe(link, saved_link, &cg->cg_links,
-				 cg_link_list) {
-		struct cgroup *cgrp = link->cgrp;
-		list_del(&link->cg_link_list);
-		list_del(&link->cgrp_link_list);
-		if (atomic_dec_and_test(&cgrp->count) &&
-		    notify_on_release(cgrp)) {
-			if (taskexit)
-				set_bit(CGRP_RELEASABLE, &cgrp->flags);
-			check_for_release(cgrp);
-		}
-
-		kfree(link);
-	}
-
 	write_unlock(&css_set_lock);
-	kfree_rcu(cg, rcu_head);
-}
-
-/*
- * refcounted get/put for css_set objects
- */
-static inline void get_css_set(struct css_set *cg)
-{
-	atomic_inc(&cg->refcount);
-}
-
-static inline void put_css_set(struct css_set *cg)
-{
-	__put_css_set(cg, 0);
-}
-
-static inline void put_css_set_taskexit(struct css_set *cg)
-{
-	__put_css_set(cg, 1);
+	call_rcu(&cg->rcu_head, free_css_set_rcu);
 }
 
 /*
@@ -720,9 +751,9 @@
  * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
  * another.  It does so using cgroup_mutex, however there are
  * several performance critical places that need to reference
- * task->cgroup without the expense of grabbing a system global
+ * task->cgroups without the expense of grabbing a system global
  * mutex.  Therefore except as noted below, when dereferencing or, as
- * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
+ * in cgroup_attach_task(), modifying a task's cgroups pointer we use
  * task_lock(), which acts on a spinlock (task->alloc_lock) already in
  * the task_struct routinely used for such matters.
  *
@@ -912,33 +943,6 @@
 }
 
 /*
- * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
- * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
- * reference to css->refcnt. In general, this refcnt is expected to goes down
- * to zero, soon.
- *
- * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
- */
-DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
-
-static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
-{
-	if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
-		wake_up_all(&cgroup_rmdir_waitq);
-}
-
-void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
-{
-	css_get(css);
-}
-
-void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
-{
-	cgroup_wakeup_rmdir_waiter(css->cgroup);
-	css_put(css);
-}
-
-/*
  * Call with cgroup_mutex held. Drops reference counts on modules, including
  * any duplicate ones that parse_cgroupfs_options took. If this function
  * returns an error, no reference counts are touched.
@@ -1820,6 +1824,7 @@
 	struct cgroup_subsys *ss, *failed_ss = NULL;
 	struct cgroup *oldcgrp;
 	struct cgroupfs_root *root = cgrp->root;
+	struct css_set *cg;
 
 	/* Nothing to do if the task is already in that cgroup */
 	oldcgrp = task_cgroup_from_root(tsk, root);
@@ -1839,6 +1844,15 @@
 				failed_ss = ss;
 				goto out;
 			}
+		} else if (!capable(CAP_SYS_ADMIN)) {
+			const struct cred *cred = current_cred(), *tcred;
+
+			/* No can_attach() - check perms generically */
+			tcred = __task_cred(tsk);
+			if (cred->euid != tcred->uid &&
+			    cred->euid != tcred->suid) {
+				return -EACCES;
+			}
 		}
 		if (ss->can_attach_task) {
 			retval = ss->can_attach_task(cgrp, tsk);
@@ -1849,6 +1863,11 @@
 		}
 	}
 
+	task_lock(tsk);
+	cg = tsk->cgroups;
+	get_css_set(cg);
+	task_unlock(tsk);
+
 	retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
 	if (retval)
 		goto out;
@@ -1861,8 +1880,9 @@
 		if (ss->attach)
 			ss->attach(ss, cgrp, oldcgrp, tsk);
 	}
-
-	synchronize_rcu();
+	set_bit(CGRP_RELEASABLE, &cgrp->flags);
+	/* put_css_set will not destroy cg until after an RCU grace period */
+	put_css_set(cg);
 
 	/*
 	 * wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -3810,6 +3830,8 @@
 	if (err < 0)
 		goto err_remove;
 
+	set_bit(CGRP_RELEASABLE, &parent->flags);
+
 	/* The cgroup directory was pre-locked for us */
 	BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
 
@@ -3941,6 +3963,21 @@
 	return !failed;
 }
 
+/* checks if all of the css_sets attached to a cgroup have a refcount of 0.
+ * Must be called with css_set_lock held */
+static int cgroup_css_sets_empty(struct cgroup *cgrp)
+{
+	struct cg_cgroup_link *link;
+
+	list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
+		struct css_set *cg = link->cg;
+		if (atomic_read(&cg->refcount) > 0)
+			return 0;
+	}
+
+	return 1;
+}
+
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
 {
 	struct cgroup *cgrp = dentry->d_fsdata;
@@ -3953,7 +3990,7 @@
 	/* the vfs holds both inode->i_mutex already */
 again:
 	mutex_lock(&cgroup_mutex);
-	if (atomic_read(&cgrp->count) != 0) {
+	if (!cgroup_css_sets_empty(cgrp)) {
 		mutex_unlock(&cgroup_mutex);
 		return -EBUSY;
 	}
@@ -3986,7 +4023,7 @@
 
 	mutex_lock(&cgroup_mutex);
 	parent = cgrp->parent;
-	if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
+	if (!cgroup_css_sets_empty(cgrp) || !list_empty(&cgrp->children)) {
 		clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
 		mutex_unlock(&cgroup_mutex);
 		return -EBUSY;
@@ -4026,7 +4063,6 @@
 	cgroup_d_remove_dir(d);
 	dput(d);
 
-	set_bit(CGRP_RELEASABLE, &parent->flags);
 	check_for_release(parent);
 
 	/*
@@ -4626,7 +4662,7 @@
 	task_unlock(tsk);
 
 	if (cg)
-		put_css_set_taskexit(cg);
+		put_css_set(cg);
 }
 
 /**
@@ -4680,6 +4716,14 @@
 }
 
 /* Caller must verify that the css is not for root cgroup */
+void __css_get(struct cgroup_subsys_state *css, int count)
+{
+	atomic_add(count, &css->refcnt);
+	set_bit(CGRP_RELEASABLE, &css->cgroup->flags);
+}
+EXPORT_SYMBOL_GPL(__css_get);
+
+/* Caller must verify that the css is not for root cgroup */
 void __css_put(struct cgroup_subsys_state *css, int count)
 {
 	struct cgroup *cgrp = css->cgroup;
@@ -4687,10 +4731,7 @@
 	rcu_read_lock();
 	val = atomic_sub_return(count, &css->refcnt);
 	if (val == 1) {
-		if (notify_on_release(cgrp)) {
-			set_bit(CGRP_RELEASABLE, &cgrp->flags);
-			check_for_release(cgrp);
-		}
+		check_for_release(cgrp);
 		cgroup_wakeup_rmdir_waiter(cgrp);
 	}
 	rcu_read_unlock();
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e691818..6ebda1d 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -164,6 +164,14 @@
 {
 	struct freezer *freezer;
 
+	if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
+		const struct cred *cred = current_cred(), *tcred;
+
+		tcred = __task_cred(task);
+		if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+			return -EPERM;
+	}
+
 	/*
 	 * Anything frozen can't move or be moved to/from.
 	 */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9c9b754..a1df487 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1373,6 +1373,13 @@
 {
 	struct cpuset *cs = cgroup_cs(cont);
 
+	if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
+		const struct cred *cred = current_cred(), *tcred;
+
+		if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+			return -EPERM;
+	}
+ 
 	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
 		return -ENOSPC;
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 0276c30..4e4b344 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -154,6 +154,9 @@
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+/* Notifier list called when a task struct is freed */
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+
 static void account_kernel_stack(struct thread_info *ti, int account)
 {
 	struct zone *zone = page_zone(virt_to_page(ti));
@@ -185,6 +188,18 @@
 		free_signal_struct(sig);
 }
 
+int task_free_register(struct notifier_block *n)
+{
+	return atomic_notifier_chain_register(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_register);
+
+int task_free_unregister(struct notifier_block *n)
+{
+	return atomic_notifier_chain_unregister(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_unregister);
+
 void __put_task_struct(struct task_struct *tsk)
 {
 	WARN_ON(!tsk->exit_state);
@@ -195,6 +210,7 @@
 	delayacct_tsk_free(tsk);
 	put_signal_struct(tsk->signal);
 
+	atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
 	if (!profile_handoff_task(tsk))
 		free_task(tsk);
 }
@@ -1013,7 +1029,7 @@
 {
 	raw_spin_lock_init(&p->pi_lock);
 #ifdef CONFIG_RT_MUTEXES
-	plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
+	plist_head_init(&p->pi_waiters);
 	p->pi_blocked_on = NULL;
 #endif
 }
diff --git a/kernel/futex.c b/kernel/futex.c
index fe28dc2..3fbc76c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2697,7 +2697,7 @@
 		futex_cmpxchg_enabled = 1;
 
 	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
-		plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
+		plist_head_init(&futex_queues[i].chain);
 		spin_lock_init(&futex_queues[i].lock);
 	}
 
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index f76fc00..85c1308 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -70,8 +70,11 @@
 
 	for_each_irq_desc(irq, desc) {
 		if (irqd_is_wakeup_set(&desc->irq_data)) {
-			if (desc->istate & IRQS_PENDING)
+			if (desc->istate & IRQS_PENDING) {
+				pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
+					irq, desc->name ? desc->name : "");
 				return -EBUSY;
+			}
 			continue;
 		}
 		/*
diff --git a/kernel/panic.c b/kernel/panic.c
index 6923167..5578d0a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -33,7 +33,10 @@
 static int pause_on_oops_flag;
 static DEFINE_SPINLOCK(pause_on_oops_lock);
 
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
 EXPORT_SYMBOL_GPL(panic_timeout);
 
 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 6824ca7..37f05d0 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -74,7 +74,7 @@
 static struct pm_qos_object null_pm_qos;
 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
 static struct pm_qos_object cpu_dma_pm_qos = {
-	.requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
+	.requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
 	.notifiers = &cpu_dma_lat_notifier,
 	.name = "cpu_dma_latency",
 	.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
@@ -84,7 +84,7 @@
 
 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
 static struct pm_qos_object network_lat_pm_qos = {
-	.requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
+	.requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
 	.notifiers = &network_lat_notifier,
 	.name = "network_latency",
 	.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
@@ -95,7 +95,7 @@
 
 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
 static struct pm_qos_object network_throughput_pm_qos = {
-	.requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
+	.requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
 	.notifiers = &network_throughput_notifier,
 	.name = "network_throughput",
 	.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 87f4d24..a70f6c8 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,73 @@
 
 	  Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config HAS_WAKELOCK
+	bool
+
+config HAS_EARLYSUSPEND
+	bool
+
+config WAKELOCK
+	bool "Wake lock"
+	depends on PM && RTC_CLASS
+	default n
+	select HAS_WAKELOCK
+	---help---
+	  Enable wakelocks. When user space request a sleep state the
+	  sleep request will be delayed until no wake locks are held.
+
+config WAKELOCK_STAT
+	bool "Wake lock stats"
+	depends on WAKELOCK
+	default y
+	---help---
+	  Report wake lock stats in /proc/wakelocks
+
+config USER_WAKELOCK
+	bool "Userspace wake locks"
+	depends on WAKELOCK
+	default y
+	---help---
+	  User-space wake lock api. Write "lockname" or "lockname timeout"
+	  to /sys/power/wake_lock lock and if needed create a wake lock.
+	  Write "lockname" to /sys/power/wake_unlock to unlock a user wake
+	  lock.
+
+config EARLYSUSPEND
+	bool "Early suspend"
+	depends on WAKELOCK
+	default y
+	select HAS_EARLYSUSPEND
+	---help---
+	  Call early suspend handlers when the user requested sleep state
+	  changes.
+
+choice
+	prompt "User-space screen access"
+	default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE
+	default CONSOLE_EARLYSUSPEND
+	depends on HAS_EARLYSUSPEND
+
+	config NO_USER_SPACE_SCREEN_ACCESS_CONTROL
+		bool "None"
+
+	config CONSOLE_EARLYSUSPEND
+		bool "Console switch on early-suspend"
+		depends on HAS_EARLYSUSPEND && VT
+		---help---
+		  Register early suspend handler to perform a console switch to
+		  when user-space should stop drawing to the screen and a switch
+		  back when it should resume.
+
+	config FB_EARLYSUSPEND
+		bool "Sysfs interface"
+		depends on HAS_EARLYSUSPEND
+		---help---
+		  Register early suspend handler that notifies and waits for
+		  user-space through sysfs when user-space should stop drawing
+		  to the screen and notifies user-space when it should resume.
+endchoice
+
 config HIBERNATE_CALLBACKS
 	bool
 
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c5ebc6a..493f19d 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,5 +8,10 @@
 obj-$(CONFIG_PM_TEST_SUSPEND)	+= suspend_test.o
 obj-$(CONFIG_HIBERNATION)	+= hibernate.o snapshot.o swap.o user.o \
 				   block_io.o
+obj-$(CONFIG_WAKELOCK)		+= wakelock.o
+obj-$(CONFIG_USER_WAKELOCK)	+= userwakelock.o
+obj-$(CONFIG_EARLYSUSPEND)	+= earlysuspend.o
+obj-$(CONFIG_CONSOLE_EARLYSUSPEND)	+= consoleearlysuspend.o
+obj-$(CONFIG_FB_EARLYSUSPEND)	+= fbearlysuspend.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c
new file mode 100644
index 0000000..a3edcb2
--- /dev/null
+++ b/kernel/power/consoleearlysuspend.c
@@ -0,0 +1,78 @@
+/* kernel/power/consoleearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/earlysuspend.h>
+#include <linux/kbd_kern.h>
+#include <linux/module.h>
+#include <linux/vt_kern.h>
+#include <linux/wait.h>
+
+#define EARLY_SUSPEND_CONSOLE	(MAX_NR_CONSOLES-1)
+
+static int orig_fgconsole;
+static void console_early_suspend(struct early_suspend *h)
+{
+	acquire_console_sem();
+	orig_fgconsole = fg_console;
+	if (vc_allocate(EARLY_SUSPEND_CONSOLE))
+		goto err;
+	if (set_console(EARLY_SUSPEND_CONSOLE))
+		goto err;
+	release_console_sem();
+
+	if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1))
+		pr_warning("console_early_suspend: Can't switch VCs.\n");
+	return;
+err:
+	pr_warning("console_early_suspend: Can't set console\n");
+	release_console_sem();
+}
+
+static void console_late_resume(struct early_suspend *h)
+{
+	int ret;
+	acquire_console_sem();
+	ret = set_console(orig_fgconsole);
+	release_console_sem();
+	if (ret) {
+		pr_warning("console_late_resume: Can't set console.\n");
+		return;
+	}
+
+	if (vt_waitactive(orig_fgconsole + 1))
+		pr_warning("console_late_resume: Can't switch VCs.\n");
+}
+
+static struct early_suspend console_early_suspend_desc = {
+	.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+	.suspend = console_early_suspend,
+	.resume = console_late_resume,
+};
+
+static int __init console_early_suspend_init(void)
+{
+	register_early_suspend(&console_early_suspend_desc);
+	return 0;
+}
+
+static void  __exit console_early_suspend_exit(void)
+{
+	unregister_early_suspend(&console_early_suspend_desc);
+}
+
+module_init(console_early_suspend_init);
+module_exit(console_early_suspend_exit);
+
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
new file mode 100644
index 0000000..84bed51
--- /dev/null
+++ b/kernel/power/earlysuspend.c
@@ -0,0 +1,178 @@
+/* kernel/power/earlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/wakelock.h>
+#include <linux/workqueue.h>
+
+#include "power.h"
+
+enum {
+	DEBUG_USER_STATE = 1U << 0,
+	DEBUG_SUSPEND = 1U << 2,
+};
+static int debug_mask = DEBUG_USER_STATE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(early_suspend_lock);
+static LIST_HEAD(early_suspend_handlers);
+static void early_suspend(struct work_struct *work);
+static void late_resume(struct work_struct *work);
+static DECLARE_WORK(early_suspend_work, early_suspend);
+static DECLARE_WORK(late_resume_work, late_resume);
+static DEFINE_SPINLOCK(state_lock);
+enum {
+	SUSPEND_REQUESTED = 0x1,
+	SUSPENDED = 0x2,
+	SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED,
+};
+static int state;
+
+void register_early_suspend(struct early_suspend *handler)
+{
+	struct list_head *pos;
+
+	mutex_lock(&early_suspend_lock);
+	list_for_each(pos, &early_suspend_handlers) {
+		struct early_suspend *e;
+		e = list_entry(pos, struct early_suspend, link);
+		if (e->level > handler->level)
+			break;
+	}
+	list_add_tail(&handler->link, pos);
+	if ((state & SUSPENDED) && handler->suspend)
+		handler->suspend(handler);
+	mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(register_early_suspend);
+
+void unregister_early_suspend(struct early_suspend *handler)
+{
+	mutex_lock(&early_suspend_lock);
+	list_del(&handler->link);
+	mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(unregister_early_suspend);
+
+static void early_suspend(struct work_struct *work)
+{
+	struct early_suspend *pos;
+	unsigned long irqflags;
+	int abort = 0;
+
+	mutex_lock(&early_suspend_lock);
+	spin_lock_irqsave(&state_lock, irqflags);
+	if (state == SUSPEND_REQUESTED)
+		state |= SUSPENDED;
+	else
+		abort = 1;
+	spin_unlock_irqrestore(&state_lock, irqflags);
+
+	if (abort) {
+		if (debug_mask & DEBUG_SUSPEND)
+			pr_info("early_suspend: abort, state %d\n", state);
+		mutex_unlock(&early_suspend_lock);
+		goto abort;
+	}
+
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("early_suspend: call handlers\n");
+	list_for_each_entry(pos, &early_suspend_handlers, link) {
+		if (pos->suspend != NULL)
+			pos->suspend(pos);
+	}
+	mutex_unlock(&early_suspend_lock);
+
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("early_suspend: sync\n");
+
+	sys_sync();
+abort:
+	spin_lock_irqsave(&state_lock, irqflags);
+	if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
+		wake_unlock(&main_wake_lock);
+	spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
+static void late_resume(struct work_struct *work)
+{
+	struct early_suspend *pos;
+	unsigned long irqflags;
+	int abort = 0;
+
+	mutex_lock(&early_suspend_lock);
+	spin_lock_irqsave(&state_lock, irqflags);
+	if (state == SUSPENDED)
+		state &= ~SUSPENDED;
+	else
+		abort = 1;
+	spin_unlock_irqrestore(&state_lock, irqflags);
+
+	if (abort) {
+		if (debug_mask & DEBUG_SUSPEND)
+			pr_info("late_resume: abort, state %d\n", state);
+		goto abort;
+	}
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("late_resume: call handlers\n");
+	list_for_each_entry_reverse(pos, &early_suspend_handlers, link)
+		if (pos->resume != NULL)
+			pos->resume(pos);
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("late_resume: done\n");
+abort:
+	mutex_unlock(&early_suspend_lock);
+}
+
+void request_suspend_state(suspend_state_t new_state)
+{
+	unsigned long irqflags;
+	int old_sleep;
+
+	spin_lock_irqsave(&state_lock, irqflags);
+	old_sleep = state & SUSPEND_REQUESTED;
+	if (debug_mask & DEBUG_USER_STATE) {
+		struct timespec ts;
+		struct rtc_time tm;
+		getnstimeofday(&ts);
+		rtc_time_to_tm(ts.tv_sec, &tm);
+		pr_info("request_suspend_state: %s (%d->%d) at %lld "
+			"(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
+			new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
+			requested_suspend_state, new_state,
+			ktime_to_ns(ktime_get()),
+			tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+			tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+	}
+	if (!old_sleep && new_state != PM_SUSPEND_ON) {
+		state |= SUSPEND_REQUESTED;
+		queue_work(suspend_work_queue, &early_suspend_work);
+	} else if (old_sleep && new_state == PM_SUSPEND_ON) {
+		state &= ~SUSPEND_REQUESTED;
+		wake_lock(&main_wake_lock);
+		queue_work(suspend_work_queue, &late_resume_work);
+	}
+	requested_suspend_state = new_state;
+	spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
+suspend_state_t get_suspend_state(void)
+{
+	return requested_suspend_state;
+}
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 0000000..1513765
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,153 @@
+/* kernel/power/fbearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+
+#include "power.h"
+
+static wait_queue_head_t fb_state_wq;
+static DEFINE_SPINLOCK(fb_state_lock);
+static enum {
+	FB_STATE_STOPPED_DRAWING,
+	FB_STATE_REQUEST_STOP_DRAWING,
+	FB_STATE_DRAWING_OK,
+} fb_state;
+
+/* tell userspace to stop drawing, wait for it to stop */
+static void stop_drawing_early_suspend(struct early_suspend *h)
+{
+	int ret;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fb_state_lock, irq_flags);
+	fb_state = FB_STATE_REQUEST_STOP_DRAWING;
+	spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+	wake_up_all(&fb_state_wq);
+	ret = wait_event_timeout(fb_state_wq,
+				 fb_state == FB_STATE_STOPPED_DRAWING,
+				 HZ);
+	if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
+		pr_warning("stop_drawing_early_suspend: timeout waiting for "
+			   "userspace to stop drawing\n");
+}
+
+/* tell userspace to start drawing */
+static void start_drawing_late_resume(struct early_suspend *h)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fb_state_lock, irq_flags);
+	fb_state = FB_STATE_DRAWING_OK;
+	spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+	wake_up(&fb_state_wq);
+}
+
+static struct early_suspend stop_drawing_early_suspend_desc = {
+	.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+	.suspend = stop_drawing_early_suspend,
+	.resume = start_drawing_late_resume,
+};
+
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int ret;
+
+	ret = wait_event_interruptible(fb_state_wq,
+				       fb_state != FB_STATE_DRAWING_OK);
+	if (ret && fb_state == FB_STATE_DRAWING_OK)
+		return ret;
+	else
+		s += sprintf(buf, "sleeping");
+	return s - buf;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+				     struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int ret;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fb_state_lock, irq_flags);
+	if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
+		fb_state = FB_STATE_STOPPED_DRAWING;
+		wake_up(&fb_state_wq);
+	}
+	spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+	ret = wait_event_interruptible(fb_state_wq,
+				       fb_state == FB_STATE_DRAWING_OK);
+	if (ret && fb_state != FB_STATE_DRAWING_OK)
+		return ret;
+	else
+		s += sprintf(buf, "awake");
+
+	return s - buf;
+}
+
+#define power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = {	\
+	.attr	= {				\
+		.name = __stringify(_name),	\
+		.mode = 0444,			\
+	},					\
+	.show	= _name##_show,			\
+	.store	= NULL,		\
+}
+
+power_ro_attr(wait_for_fb_sleep);
+power_ro_attr(wait_for_fb_wake);
+
+static struct attribute *g[] = {
+	&wait_for_fb_sleep_attr.attr,
+	&wait_for_fb_wake_attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = g,
+};
+
+static int __init android_power_init(void)
+{
+	int ret;
+
+	init_waitqueue_head(&fb_state_wq);
+	fb_state = FB_STATE_DRAWING_OK;
+
+	ret = sysfs_create_group(power_kobj, &attr_group);
+	if (ret) {
+		pr_err("android_power_init: sysfs_create_group failed\n");
+		return ret;
+	}
+
+	register_early_suspend(&stop_drawing_early_suspend_desc);
+	return 0;
+}
+
+static void  __exit android_power_exit(void)
+{
+	unregister_early_suspend(&stop_drawing_early_suspend_desc);
+	sysfs_remove_group(power_kobj, &attr_group);
+}
+
+module_init(android_power_init);
+module_exit(android_power_exit);
+
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4..ff29679 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -170,7 +170,11 @@
 			   const char *buf, size_t n)
 {
 #ifdef CONFIG_SUSPEND
+#ifdef CONFIG_EARLYSUSPEND
+	suspend_state_t state = PM_SUSPEND_ON;
+#else
 	suspend_state_t state = PM_SUSPEND_STANDBY;
+#endif
 	const char * const *s;
 #endif
 	char *p;
@@ -192,8 +196,15 @@
 			break;
 	}
 	if (state < PM_SUSPEND_MAX && *s)
+#ifdef CONFIG_EARLYSUSPEND
+		if (state == PM_SUSPEND_ON || valid_state(state)) {
+			error = 0;
+			request_suspend_state(state);
+		}
+#else
 		error = enter_state(state);
 #endif
+#endif
 
  Exit:
 	return error ? error : n;
@@ -297,6 +308,11 @@
 
 #endif /* CONFIG_PM_TRACE */
 
+#ifdef CONFIG_USER_WAKELOCK
+power_attr(wake_lock);
+power_attr(wake_unlock);
+#endif
+
 static struct attribute * g[] = {
 	&state_attr.attr,
 #ifdef CONFIG_PM_TRACE
@@ -309,6 +325,10 @@
 #ifdef CONFIG_PM_DEBUG
 	&pm_test_attr.attr,
 #endif
+#ifdef CONFIG_USER_WAKELOCK
+	&wake_lock_attr.attr,
+	&wake_unlock_attr.attr,
+#endif
 #endif
 	NULL,
 };
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 9a00a0a..b6b9006 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -245,3 +245,27 @@
 {
 }
 #endif
+
+#ifdef CONFIG_WAKELOCK
+/* kernel/power/wakelock.c */
+extern struct workqueue_struct *suspend_work_queue;
+extern struct wake_lock main_wake_lock;
+extern suspend_state_t requested_suspend_state;
+#endif
+
+#ifdef CONFIG_USER_WAKELOCK
+ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf);
+ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
+			const char *buf, size_t n);
+ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf);
+ssize_t  wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr,
+			const char *buf, size_t n);
+#endif
+
+#ifdef CONFIG_EARLYSUSPEND
+/* kernel/power/earlysuspend.c */
+void request_suspend_state(suspend_state_t state);
+suspend_state_t get_suspend_state(void);
+#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0cf3a27..31338cd 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
 #include <linux/freezer.h>
 #include <linux/delay.h>
 #include <linux/workqueue.h>
+#include <linux/wakelock.h>
 
 /* 
  * Timeout for stopping processes
@@ -82,6 +83,10 @@
 			todo += wq_busy;
 		}
 
+		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
+			wakeup = 1;
+			break;
+		}
 		if (!todo || time_after(jiffies, end_time))
 			break;
 
@@ -108,19 +113,25 @@
 		 * and caller must call thaw_processes() if something fails),
 		 * but it cleans up leftover PF_FREEZE requests.
 		 */
-		printk("\n");
-		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
-		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
-		       wakeup ? "aborted" : "failed",
-		       elapsed_csecs / 100, elapsed_csecs % 100,
-		       todo - wq_busy, wq_busy);
-
+		if(wakeup) {
+			printk("\n");
+			printk(KERN_ERR "Freezing of %s aborted\n",
+					sig_only ? "user space " : "tasks ");
+		}
+		else {
+			printk("\n");
+			printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
+			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
+			       elapsed_csecs / 100, elapsed_csecs % 100,
+			       todo - wq_busy, wq_busy);
+		}
 		thaw_workqueues();
 
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 			task_lock(p);
-			if (!wakeup && freezing(p) && !freezer_should_skip(p))
+			if (freezing(p) && !freezer_should_skip(p) &&
+				elapsed_csecs > 100)
 				sched_show_task(p);
 			cancel_freezing(p);
 			task_unlock(p);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1c41ba2..63774df 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -28,6 +28,9 @@
 #include "power.h"
 
 const char *const pm_states[PM_SUSPEND_MAX] = {
+#ifdef CONFIG_EARLYSUSPEND
+	[PM_SUSPEND_ON]		= "on",
+#endif
 	[PM_SUSPEND_STANDBY]	= "standby",
 	[PM_SUSPEND_MEM]	= "mem",
 };
diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c
new file mode 100644
index 0000000..a28a8db
--- /dev/null
+++ b/kernel/power/userwakelock.c
@@ -0,0 +1,219 @@
+/* kernel/power/userwakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/wakelock.h>
+#include <linux/slab.h>
+
+#include "power.h"
+
+enum {
+	DEBUG_FAILURE	= BIT(0),
+	DEBUG_ERROR	= BIT(1),
+	DEBUG_NEW	= BIT(2),
+	DEBUG_ACCESS	= BIT(3),
+	DEBUG_LOOKUP	= BIT(4),
+};
+static int debug_mask = DEBUG_FAILURE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(tree_lock);
+
+struct user_wake_lock {
+	struct rb_node		node;
+	struct wake_lock	wake_lock;
+	char			name[0];
+};
+struct rb_root user_wake_locks;
+
+static struct user_wake_lock *lookup_wake_lock_name(
+	const char *buf, int allocate, long *timeoutptr)
+{
+	struct rb_node **p = &user_wake_locks.rb_node;
+	struct rb_node *parent = NULL;
+	struct user_wake_lock *l;
+	int diff;
+	u64 timeout;
+	int name_len;
+	const char *arg;
+
+	/* Find length of lock name and start of optional timeout string */
+	arg = buf;
+	while (*arg && !isspace(*arg))
+		arg++;
+	name_len = arg - buf;
+	if (!name_len)
+		goto bad_arg;
+	while (isspace(*arg))
+		arg++;
+
+	/* Process timeout string */
+	if (timeoutptr && *arg) {
+		timeout = simple_strtoull(arg, (char **)&arg, 0);
+		while (isspace(*arg))
+			arg++;
+		if (*arg)
+			goto bad_arg;
+		/* convert timeout from nanoseconds to jiffies > 0 */
+		timeout += (NSEC_PER_SEC / HZ) - 1;
+		do_div(timeout, (NSEC_PER_SEC / HZ));
+		if (timeout <= 0)
+			timeout = 1;
+		*timeoutptr = timeout;
+	} else if (*arg)
+		goto bad_arg;
+	else if (timeoutptr)
+		*timeoutptr = 0;
+
+	/* Lookup wake lock in rbtree */
+	while (*p) {
+		parent = *p;
+		l = rb_entry(parent, struct user_wake_lock, node);
+		diff = strncmp(buf, l->name, name_len);
+		if (!diff && l->name[name_len])
+			diff = -1;
+		if (debug_mask & DEBUG_ERROR)
+			pr_info("lookup_wake_lock_name: compare %.*s %s %d\n",
+				name_len, buf, l->name, diff);
+
+		if (diff < 0)
+			p = &(*p)->rb_left;
+		else if (diff > 0)
+			p = &(*p)->rb_right;
+		else
+			return l;
+	}
+
+	/* Allocate and add new wakelock to rbtree */
+	if (!allocate) {
+		if (debug_mask & DEBUG_ERROR)
+			pr_info("lookup_wake_lock_name: %.*s not found\n",
+				name_len, buf);
+		return ERR_PTR(-EINVAL);
+	}
+	l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL);
+	if (l == NULL) {
+		if (debug_mask & DEBUG_FAILURE)
+			pr_err("lookup_wake_lock_name: failed to allocate "
+				"memory for %.*s\n", name_len, buf);
+		return ERR_PTR(-ENOMEM);
+	}
+	memcpy(l->name, buf, name_len);
+	if (debug_mask & DEBUG_NEW)
+		pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name);
+	wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name);
+	rb_link_node(&l->node, parent, p);
+	rb_insert_color(&l->node, &user_wake_locks);
+	return l;
+
+bad_arg:
+	if (debug_mask & DEBUG_ERROR)
+		pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n",
+			name_len, buf, arg);
+	return ERR_PTR(-EINVAL);
+}
+
+ssize_t wake_lock_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	char *end = buf + PAGE_SIZE;
+	struct rb_node *n;
+	struct user_wake_lock *l;
+
+	mutex_lock(&tree_lock);
+
+	for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+		l = rb_entry(n, struct user_wake_lock, node);
+		if (wake_lock_active(&l->wake_lock))
+			s += scnprintf(s, end - s, "%s ", l->name);
+	}
+	s += scnprintf(s, end - s, "\n");
+
+	mutex_unlock(&tree_lock);
+	return (s - buf);
+}
+
+ssize_t wake_lock_store(
+	struct kobject *kobj, struct kobj_attribute *attr,
+	const char *buf, size_t n)
+{
+	long timeout;
+	struct user_wake_lock *l;
+
+	mutex_lock(&tree_lock);
+	l = lookup_wake_lock_name(buf, 1, &timeout);
+	if (IS_ERR(l)) {
+		n = PTR_ERR(l);
+		goto bad_name;
+	}
+
+	if (debug_mask & DEBUG_ACCESS)
+		pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout);
+
+	if (timeout)
+		wake_lock_timeout(&l->wake_lock, timeout);
+	else
+		wake_lock(&l->wake_lock);
+bad_name:
+	mutex_unlock(&tree_lock);
+	return n;
+}
+
+
+ssize_t wake_unlock_show(
+	struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	char *end = buf + PAGE_SIZE;
+	struct rb_node *n;
+	struct user_wake_lock *l;
+
+	mutex_lock(&tree_lock);
+
+	for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+		l = rb_entry(n, struct user_wake_lock, node);
+		if (!wake_lock_active(&l->wake_lock))
+			s += scnprintf(s, end - s, "%s ", l->name);
+	}
+	s += scnprintf(s, end - s, "\n");
+
+	mutex_unlock(&tree_lock);
+	return (s - buf);
+}
+
+ssize_t wake_unlock_store(
+	struct kobject *kobj, struct kobj_attribute *attr,
+	const char *buf, size_t n)
+{
+	struct user_wake_lock *l;
+
+	mutex_lock(&tree_lock);
+	l = lookup_wake_lock_name(buf, 0, NULL);
+	if (IS_ERR(l)) {
+		n = PTR_ERR(l);
+		goto not_found;
+	}
+
+	if (debug_mask & DEBUG_ACCESS)
+		pr_info("wake_unlock_store: %s\n", l->name);
+
+	wake_unlock(&l->wake_lock);
+not_found:
+	mutex_unlock(&tree_lock);
+	return n;
+}
+
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
new file mode 100644
index 0000000..c10d0ee
--- /dev/null
+++ b/kernel/power/wakelock.c
@@ -0,0 +1,603 @@
+/* kernel/power/wakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/wakelock.h>
+#ifdef CONFIG_WAKELOCK_STAT
+#include <linux/proc_fs.h>
+#endif
+#include "power.h"
+
+enum {
+	DEBUG_EXIT_SUSPEND = 1U << 0,
+	DEBUG_WAKEUP = 1U << 1,
+	DEBUG_SUSPEND = 1U << 2,
+	DEBUG_EXPIRE = 1U << 3,
+	DEBUG_WAKE_LOCK = 1U << 4,
+};
+static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define WAKE_LOCK_TYPE_MASK              (0x0f)
+#define WAKE_LOCK_INITIALIZED            (1U << 8)
+#define WAKE_LOCK_ACTIVE                 (1U << 9)
+#define WAKE_LOCK_AUTO_EXPIRE            (1U << 10)
+#define WAKE_LOCK_PREVENTING_SUSPEND     (1U << 11)
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(inactive_locks);
+static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT];
+static int current_event_num;
+struct workqueue_struct *suspend_work_queue;
+struct wake_lock main_wake_lock;
+suspend_state_t requested_suspend_state = PM_SUSPEND_MEM;
+static struct wake_lock unknown_wakeup;
+
+#ifdef CONFIG_WAKELOCK_STAT
+static struct wake_lock deleted_wake_locks;
+static ktime_t last_sleep_time_update;
+static int wait_for_wakeup;
+
+int get_expired_time(struct wake_lock *lock, ktime_t *expire_time)
+{
+	struct timespec ts;
+	struct timespec kt;
+	struct timespec tomono;
+	struct timespec delta;
+	struct timespec sleep;
+	long timeout;
+
+	if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE))
+		return 0;
+	get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep);
+	timeout = lock->expires - jiffies;
+	if (timeout > 0)
+		return 0;
+	jiffies_to_timespec(-timeout, &delta);
+	set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec,
+				kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec);
+	*expire_time = timespec_to_ktime(ts);
+	return 1;
+}
+
+
+static int print_lock_stat(struct seq_file *m, struct wake_lock *lock)
+{
+	int lock_count = lock->stat.count;
+	int expire_count = lock->stat.expire_count;
+	ktime_t active_time = ktime_set(0, 0);
+	ktime_t total_time = lock->stat.total_time;
+	ktime_t max_time = lock->stat.max_time;
+
+	ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time;
+	if (lock->flags & WAKE_LOCK_ACTIVE) {
+		ktime_t now, add_time;
+		int expired = get_expired_time(lock, &now);
+		if (!expired)
+			now = ktime_get();
+		add_time = ktime_sub(now, lock->stat.last_time);
+		lock_count++;
+		if (!expired)
+			active_time = add_time;
+		else
+			expire_count++;
+		total_time = ktime_add(total_time, add_time);
+		if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND)
+			prevent_suspend_time = ktime_add(prevent_suspend_time,
+					ktime_sub(now, last_sleep_time_update));
+		if (add_time.tv64 > max_time.tv64)
+			max_time = add_time;
+	}
+
+	return seq_printf(m,
+		     "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n",
+		     lock->name, lock_count, expire_count,
+		     lock->stat.wakeup_count, ktime_to_ns(active_time),
+		     ktime_to_ns(total_time),
+		     ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time),
+		     ktime_to_ns(lock->stat.last_time));
+}
+
+static int wakelock_stats_show(struct seq_file *m, void *unused)
+{
+	unsigned long irqflags;
+	struct wake_lock *lock;
+	int ret;
+	int type;
+
+	spin_lock_irqsave(&list_lock, irqflags);
+
+	ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since"
+			"\ttotal_time\tsleep_time\tmax_time\tlast_change\n");
+	list_for_each_entry(lock, &inactive_locks, link)
+		ret = print_lock_stat(m, lock);
+	for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) {
+		list_for_each_entry(lock, &active_wake_locks[type], link)
+			ret = print_lock_stat(m, lock);
+	}
+	spin_unlock_irqrestore(&list_lock, irqflags);
+	return 0;
+}
+
+static void wake_unlock_stat_locked(struct wake_lock *lock, int expired)
+{
+	ktime_t duration;
+	ktime_t now;
+	if (!(lock->flags & WAKE_LOCK_ACTIVE))
+		return;
+	if (get_expired_time(lock, &now))
+		expired = 1;
+	else
+		now = ktime_get();
+	lock->stat.count++;
+	if (expired)
+		lock->stat.expire_count++;
+	duration = ktime_sub(now, lock->stat.last_time);
+	lock->stat.total_time = ktime_add(lock->stat.total_time, duration);
+	if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time))
+		lock->stat.max_time = duration;
+	lock->stat.last_time = ktime_get();
+	if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
+		duration = ktime_sub(now, last_sleep_time_update);
+		lock->stat.prevent_suspend_time = ktime_add(
+			lock->stat.prevent_suspend_time, duration);
+		lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
+	}
+}
+
+static void update_sleep_wait_stats_locked(int done)
+{
+	struct wake_lock *lock;
+	ktime_t now, etime, elapsed, add;
+	int expired;
+
+	now = ktime_get();
+	elapsed = ktime_sub(now, last_sleep_time_update);
+	list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) {
+		expired = get_expired_time(lock, &etime);
+		if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
+			if (expired)
+				add = ktime_sub(etime, last_sleep_time_update);
+			else
+				add = elapsed;
+			lock->stat.prevent_suspend_time = ktime_add(
+				lock->stat.prevent_suspend_time, add);
+		}
+		if (done || expired)
+			lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
+		else
+			lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND;
+	}
+	last_sleep_time_update = now;
+}
+#endif
+
+
+static void expire_wake_lock(struct wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK_STAT
+	wake_unlock_stat_locked(lock, 1);
+#endif
+	lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
+	list_del(&lock->link);
+	list_add(&lock->link, &inactive_locks);
+	if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE))
+		pr_info("expired wake lock %s\n", lock->name);
+}
+
+/* Caller must acquire the list_lock spinlock */
+static void print_active_locks(int type)
+{
+	struct wake_lock *lock;
+	bool print_expired = true;
+
+	BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+	list_for_each_entry(lock, &active_wake_locks[type], link) {
+		if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
+			long timeout = lock->expires - jiffies;
+			if (timeout > 0)
+				pr_info("active wake lock %s, time left %ld\n",
+					lock->name, timeout);
+			else if (print_expired)
+				pr_info("wake lock %s, expired\n", lock->name);
+		} else {
+			pr_info("active wake lock %s\n", lock->name);
+			if (!(debug_mask & DEBUG_EXPIRE))
+				print_expired = false;
+		}
+	}
+}
+
+static long has_wake_lock_locked(int type)
+{
+	struct wake_lock *lock, *n;
+	long max_timeout = 0;
+
+	BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+	list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) {
+		if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
+			long timeout = lock->expires - jiffies;
+			if (timeout <= 0)
+				expire_wake_lock(lock);
+			else if (timeout > max_timeout)
+				max_timeout = timeout;
+		} else
+			return -1;
+	}
+	return max_timeout;
+}
+
+long has_wake_lock(int type)
+{
+	long ret;
+	unsigned long irqflags;
+	spin_lock_irqsave(&list_lock, irqflags);
+	ret = has_wake_lock_locked(type);
+	if (ret && (debug_mask & DEBUG_SUSPEND) && type == WAKE_LOCK_SUSPEND)
+		print_active_locks(type);
+	spin_unlock_irqrestore(&list_lock, irqflags);
+	return ret;
+}
+
+static void suspend(struct work_struct *work)
+{
+	int ret;
+	int entry_event_num;
+
+	if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
+		if (debug_mask & DEBUG_SUSPEND)
+			pr_info("suspend: abort suspend\n");
+		return;
+	}
+
+	entry_event_num = current_event_num;
+	sys_sync();
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("suspend: enter suspend\n");
+	ret = pm_suspend(requested_suspend_state);
+	if (debug_mask & DEBUG_EXIT_SUSPEND) {
+		struct timespec ts;
+		struct rtc_time tm;
+		getnstimeofday(&ts);
+		rtc_time_to_tm(ts.tv_sec, &tm);
+		pr_info("suspend: exit suspend, ret = %d "
+			"(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret,
+			tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+			tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+	}
+	if (current_event_num == entry_event_num) {
+		if (debug_mask & DEBUG_SUSPEND)
+			pr_info("suspend: pm_suspend returned with no event\n");
+		wake_lock_timeout(&unknown_wakeup, HZ / 2);
+	}
+}
+static DECLARE_WORK(suspend_work, suspend);
+
+static void expire_wake_locks(unsigned long data)
+{
+	long has_lock;
+	unsigned long irqflags;
+	if (debug_mask & DEBUG_EXPIRE)
+		pr_info("expire_wake_locks: start\n");
+	spin_lock_irqsave(&list_lock, irqflags);
+	if (debug_mask & DEBUG_SUSPEND)
+		print_active_locks(WAKE_LOCK_SUSPEND);
+	has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND);
+	if (debug_mask & DEBUG_EXPIRE)
+		pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock);
+	if (has_lock == 0)
+		queue_work(suspend_work_queue, &suspend_work);
+	spin_unlock_irqrestore(&list_lock, irqflags);
+}
+static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0);
+
+static int power_suspend_late(struct device *dev)
+{
+	int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0;
+#ifdef CONFIG_WAKELOCK_STAT
+	wait_for_wakeup = 1;
+#endif
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("power_suspend_late return %d\n", ret);
+	return ret;
+}
+
+static struct dev_pm_ops power_driver_pm_ops = {
+	.suspend_noirq = power_suspend_late,
+};
+
+static struct platform_driver power_driver = {
+	.driver.name = "power",
+	.driver.pm = &power_driver_pm_ops,
+};
+static struct platform_device power_device = {
+	.name = "power",
+};
+
+void wake_lock_init(struct wake_lock *lock, int type, const char *name)
+{
+	unsigned long irqflags = 0;
+
+	if (name)
+		lock->name = name;
+	BUG_ON(!lock->name);
+
+	if (debug_mask & DEBUG_WAKE_LOCK)
+		pr_info("wake_lock_init name=%s\n", lock->name);
+#ifdef CONFIG_WAKELOCK_STAT
+	lock->stat.count = 0;
+	lock->stat.expire_count = 0;
+	lock->stat.wakeup_count = 0;
+	lock->stat.total_time = ktime_set(0, 0);
+	lock->stat.prevent_suspend_time = ktime_set(0, 0);
+	lock->stat.max_time = ktime_set(0, 0);
+	lock->stat.last_time = ktime_set(0, 0);
+#endif
+	lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED;
+
+	INIT_LIST_HEAD(&lock->link);
+	spin_lock_irqsave(&list_lock, irqflags);
+	list_add(&lock->link, &inactive_locks);
+	spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_lock_init);
+
+void wake_lock_destroy(struct wake_lock *lock)
+{
+	unsigned long irqflags;
+	if (debug_mask & DEBUG_WAKE_LOCK)
+		pr_info("wake_lock_destroy name=%s\n", lock->name);
+	spin_lock_irqsave(&list_lock, irqflags);
+	lock->flags &= ~WAKE_LOCK_INITIALIZED;
+#ifdef CONFIG_WAKELOCK_STAT
+	if (lock->stat.count) {
+		deleted_wake_locks.stat.count += lock->stat.count;
+		deleted_wake_locks.stat.expire_count += lock->stat.expire_count;
+		deleted_wake_locks.stat.total_time =
+			ktime_add(deleted_wake_locks.stat.total_time,
+				  lock->stat.total_time);
+		deleted_wake_locks.stat.prevent_suspend_time =
+			ktime_add(deleted_wake_locks.stat.prevent_suspend_time,
+				  lock->stat.prevent_suspend_time);
+		deleted_wake_locks.stat.max_time =
+			ktime_add(deleted_wake_locks.stat.max_time,
+				  lock->stat.max_time);
+	}
+#endif
+	list_del(&lock->link);
+	spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_lock_destroy);
+
+static void wake_lock_internal(
+	struct wake_lock *lock, long timeout, int has_timeout)
+{
+	int type;
+	unsigned long irqflags;
+	long expire_in;
+
+	spin_lock_irqsave(&list_lock, irqflags);
+	type = lock->flags & WAKE_LOCK_TYPE_MASK;
+	BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+	BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED));
+#ifdef CONFIG_WAKELOCK_STAT
+	if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) {
+		if (debug_mask & DEBUG_WAKEUP)
+			pr_info("wakeup wake lock: %s\n", lock->name);
+		wait_for_wakeup = 0;
+		lock->stat.wakeup_count++;
+	}
+	if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) &&
+	    (long)(lock->expires - jiffies) <= 0) {
+		wake_unlock_stat_locked(lock, 0);
+		lock->stat.last_time = ktime_get();
+	}
+#endif
+	if (!(lock->flags & WAKE_LOCK_ACTIVE)) {
+		lock->flags |= WAKE_LOCK_ACTIVE;
+#ifdef CONFIG_WAKELOCK_STAT
+		lock->stat.last_time = ktime_get();
+#endif
+	}
+	list_del(&lock->link);
+	if (has_timeout) {
+		if (debug_mask & DEBUG_WAKE_LOCK)
+			pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n",
+				lock->name, type, timeout / HZ,
+				(timeout % HZ) * MSEC_PER_SEC / HZ);
+		lock->expires = jiffies + timeout;
+		lock->flags |= WAKE_LOCK_AUTO_EXPIRE;
+		list_add_tail(&lock->link, &active_wake_locks[type]);
+	} else {
+		if (debug_mask & DEBUG_WAKE_LOCK)
+			pr_info("wake_lock: %s, type %d\n", lock->name, type);
+		lock->expires = LONG_MAX;
+		lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE;
+		list_add(&lock->link, &active_wake_locks[type]);
+	}
+	if (type == WAKE_LOCK_SUSPEND) {
+		current_event_num++;
+#ifdef CONFIG_WAKELOCK_STAT
+		if (lock == &main_wake_lock)
+			update_sleep_wait_stats_locked(1);
+		else if (!wake_lock_active(&main_wake_lock))
+			update_sleep_wait_stats_locked(0);
+#endif
+		if (has_timeout)
+			expire_in = has_wake_lock_locked(type);
+		else
+			expire_in = -1;
+		if (expire_in > 0) {
+			if (debug_mask & DEBUG_EXPIRE)
+				pr_info("wake_lock: %s, start expire timer, "
+					"%ld\n", lock->name, expire_in);
+			mod_timer(&expire_timer, jiffies + expire_in);
+		} else {
+			if (del_timer(&expire_timer))
+				if (debug_mask & DEBUG_EXPIRE)
+					pr_info("wake_lock: %s, stop expire timer\n",
+						lock->name);
+			if (expire_in == 0)
+				queue_work(suspend_work_queue, &suspend_work);
+		}
+	}
+	spin_unlock_irqrestore(&list_lock, irqflags);
+}
+
+void wake_lock(struct wake_lock *lock)
+{
+	wake_lock_internal(lock, 0, 0);
+}
+EXPORT_SYMBOL(wake_lock);
+
+void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+	wake_lock_internal(lock, timeout, 1);
+}
+EXPORT_SYMBOL(wake_lock_timeout);
+
+void wake_unlock(struct wake_lock *lock)
+{
+	int type;
+	unsigned long irqflags;
+	spin_lock_irqsave(&list_lock, irqflags);
+	type = lock->flags & WAKE_LOCK_TYPE_MASK;
+#ifdef CONFIG_WAKELOCK_STAT
+	wake_unlock_stat_locked(lock, 0);
+#endif
+	if (debug_mask & DEBUG_WAKE_LOCK)
+		pr_info("wake_unlock: %s\n", lock->name);
+	lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
+	list_del(&lock->link);
+	list_add(&lock->link, &inactive_locks);
+	if (type == WAKE_LOCK_SUSPEND) {
+		long has_lock = has_wake_lock_locked(type);
+		if (has_lock > 0) {
+			if (debug_mask & DEBUG_EXPIRE)
+				pr_info("wake_unlock: %s, start expire timer, "
+					"%ld\n", lock->name, has_lock);
+			mod_timer(&expire_timer, jiffies + has_lock);
+		} else {
+			if (del_timer(&expire_timer))
+				if (debug_mask & DEBUG_EXPIRE)
+					pr_info("wake_unlock: %s, stop expire "
+						"timer\n", lock->name);
+			if (has_lock == 0)
+				queue_work(suspend_work_queue, &suspend_work);
+		}
+		if (lock == &main_wake_lock) {
+			if (debug_mask & DEBUG_SUSPEND)
+				print_active_locks(WAKE_LOCK_SUSPEND);
+#ifdef CONFIG_WAKELOCK_STAT
+			update_sleep_wait_stats_locked(0);
+#endif
+		}
+	}
+	spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_unlock);
+
+int wake_lock_active(struct wake_lock *lock)
+{
+	return !!(lock->flags & WAKE_LOCK_ACTIVE);
+}
+EXPORT_SYMBOL(wake_lock_active);
+
+static int wakelock_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, wakelock_stats_show, NULL);
+}
+
+static const struct file_operations wakelock_stats_fops = {
+	.owner = THIS_MODULE,
+	.open = wakelock_stats_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init wakelocks_init(void)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++)
+		INIT_LIST_HEAD(&active_wake_locks[i]);
+
+#ifdef CONFIG_WAKELOCK_STAT
+	wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND,
+			"deleted_wake_locks");
+#endif
+	wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main");
+	wake_lock(&main_wake_lock);
+	wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups");
+
+	ret = platform_device_register(&power_device);
+	if (ret) {
+		pr_err("wakelocks_init: platform_device_register failed\n");
+		goto err_platform_device_register;
+	}
+	ret = platform_driver_register(&power_driver);
+	if (ret) {
+		pr_err("wakelocks_init: platform_driver_register failed\n");
+		goto err_platform_driver_register;
+	}
+
+	suspend_work_queue = create_singlethread_workqueue("suspend");
+	if (suspend_work_queue == NULL) {
+		ret = -ENOMEM;
+		goto err_suspend_work_queue;
+	}
+
+#ifdef CONFIG_WAKELOCK_STAT
+	proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops);
+#endif
+
+	return 0;
+
+err_suspend_work_queue:
+	platform_driver_unregister(&power_driver);
+err_platform_driver_register:
+	platform_device_unregister(&power_device);
+err_platform_device_register:
+	wake_lock_destroy(&unknown_wakeup);
+	wake_lock_destroy(&main_wake_lock);
+#ifdef CONFIG_WAKELOCK_STAT
+	wake_lock_destroy(&deleted_wake_locks);
+#endif
+	return ret;
+}
+
+static void  __exit wakelocks_exit(void)
+{
+#ifdef CONFIG_WAKELOCK_STAT
+	remove_proc_entry("wakelocks", NULL);
+#endif
+	destroy_workqueue(suspend_work_queue);
+	platform_driver_unregister(&power_driver);
+	platform_device_unregister(&power_device);
+	wake_lock_destroy(&unknown_wakeup);
+	wake_lock_destroy(&main_wake_lock);
+#ifdef CONFIG_WAKELOCK_STAT
+	wake_lock_destroy(&deleted_wake_locks);
+#endif
+}
+
+core_initcall(wakelocks_init);
+module_exit(wakelocks_exit);
diff --git a/kernel/printk.c b/kernel/printk.c
index 3518539..4835df7 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -53,6 +53,10 @@
 
 #define __LOG_BUF_LEN	(1 << CONFIG_LOG_BUF_SHIFT)
 
+#ifdef        CONFIG_DEBUG_LL
+extern void printascii(char *);
+#endif
+
 /* printk's without a loglevel use this.. */
 #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
 
@@ -290,6 +294,53 @@
 }
 #endif
 
+/*
+ * Return the number of unread characters in the log buffer.
+ */
+static int log_buf_get_len(void)
+{
+	return logged_chars;
+}
+
+/*
+ * Clears the ring-buffer
+ */
+void log_buf_clear(void)
+{
+	logged_chars = 0;
+}
+
+/*
+ * Copy a range of characters from the log buffer.
+ */
+int log_buf_copy(char *dest, int idx, int len)
+{
+	int ret, max;
+	bool took_lock = false;
+
+	if (!oops_in_progress) {
+		spin_lock_irq(&logbuf_lock);
+		took_lock = true;
+	}
+
+	max = log_buf_get_len();
+	if (idx < 0 || idx >= max) {
+		ret = -1;
+	} else {
+		if (len > max - idx)
+			len = max - idx;
+		ret = len;
+		idx += (log_end - max);
+		while (len-- > 0)
+			dest[len] = LOG_BUF(idx + len);
+	}
+
+	if (took_lock)
+		spin_unlock_irq(&logbuf_lock);
+
+	return ret;
+}
+
 #ifdef CONFIG_SECURITY_DMESG_RESTRICT
 int dmesg_restrict = 1;
 #else
@@ -872,6 +923,10 @@
 	printed_len += vscnprintf(printk_buf + printed_len,
 				  sizeof(printk_buf) - printed_len, fmt, args);
 
+#ifdef	CONFIG_DEBUG_LL
+	printascii(printk_buf);
+#endif
+
 	p = printk_buf;
 
 	/* Read log level and handle special printk prefix */
@@ -1146,7 +1201,6 @@
 	switch (action) {
 	case CPU_ONLINE:
 	case CPU_DEAD:
-	case CPU_DYING:
 	case CPU_DOWN_FAILED:
 	case CPU_UP_CANCELED:
 		console_lock();
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index ab44911..255e166 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -890,7 +890,7 @@
 {
 	lock->owner = NULL;
 	raw_spin_lock_init(&lock->wait_lock);
-	plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
+	plist_head_init(&lock->wait_list);
 
 	debug_rt_mutex_init(lock, name);
 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 9769c75..05735d3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
+#include <linux/cpuacct.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -7781,7 +7782,7 @@
 #ifdef CONFIG_SMP
 	rt_rq->rt_nr_migratory = 0;
 	rt_rq->overloaded = 0;
-	plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
+	plist_head_init(&rt_rq->pushable_tasks);
 #endif
 
 	rt_rq->rt_time = 0;
@@ -7986,7 +7987,7 @@
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
-	plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
+	plist_head_init(&init_task.pi_waiters);
 #endif
 
 	/*
@@ -8037,13 +8038,24 @@
 	return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+	__might_sleep_init_called = 1;
+	return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
 #ifdef in_atomic
 	static unsigned long prev_jiffy;	/* ratelimiting */
 
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
-	    system_state != SYSTEM_RUNNING || oops_in_progress)
+	    oops_in_progress)
+		return;
+	if (system_state != SYSTEM_RUNNING &&
+	    (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
 		return;
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 		return;
@@ -8789,6 +8801,15 @@
 static int
 cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
+	if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
+		const struct cred *cred = current_cred(), *tcred;
+
+		tcred = __task_cred(tsk);
+
+		if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+			return -EPERM;
+	}
+
 #ifdef CONFIG_RT_GROUP_SCHED
 	if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
 		return -EINVAL;
@@ -8917,8 +8938,30 @@
 	u64 __percpu *cpuusage;
 	struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
 	struct cpuacct *parent;
+	struct cpuacct_charge_calls *cpufreq_fn;
+	void *cpuacct_data;
 };
 
+static struct cpuacct *cpuacct_root;
+
+/* Default calls for cpufreq accounting */
+static struct cpuacct_charge_calls *cpuacct_cpufreq;
+int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn)
+{
+	cpuacct_cpufreq = fn;
+
+	/*
+	 * Root node is created before platform can register callbacks,
+	 * initalize here.
+	 */
+	if (cpuacct_root && fn) {
+		cpuacct_root->cpufreq_fn = fn;
+		if (fn->init)
+			fn->init(&cpuacct_root->cpuacct_data);
+	}
+	return 0;
+}
+
 struct cgroup_subsys cpuacct_subsys;
 
 /* return cpu accounting group corresponding to this container */
@@ -8953,8 +8996,16 @@
 		if (percpu_counter_init(&ca->cpustat[i], 0))
 			goto out_free_counters;
 
+	ca->cpufreq_fn = cpuacct_cpufreq;
+
+	/* If available, have platform code initalize cpu frequency table */
+	if (ca->cpufreq_fn && ca->cpufreq_fn->init)
+		ca->cpufreq_fn->init(&ca->cpuacct_data);
+
 	if (cgrp->parent)
 		ca->parent = cgroup_ca(cgrp->parent);
+	else
+		cpuacct_root = ca;
 
 	return &ca->css;
 
@@ -9082,6 +9133,32 @@
 	return 0;
 }
 
+static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft,
+		struct cgroup_map_cb *cb)
+{
+	struct cpuacct *ca = cgroup_ca(cgrp);
+	if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show)
+		ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb);
+
+	return 0;
+}
+
+/* return total cpu power usage (milliWatt second) of a group */
+static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft)
+{
+	int i;
+	struct cpuacct *ca = cgroup_ca(cgrp);
+	u64 totalpower = 0;
+
+	if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage)
+		for_each_present_cpu(i) {
+			totalpower += ca->cpufreq_fn->power_usage(
+					ca->cpuacct_data);
+		}
+
+	return totalpower;
+}
+
 static struct cftype files[] = {
 	{
 		.name = "usage",
@@ -9096,6 +9173,14 @@
 		.name = "stat",
 		.read_map = cpuacct_stats_show,
 	},
+	{
+		.name =  "cpufreq",
+		.read_map = cpuacct_cpufreq_show,
+	},
+	{
+		.name = "power",
+		.read_u64 = cpuacct_powerusage_read
+	},
 };
 
 static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
@@ -9125,6 +9210,10 @@
 	for (; ca; ca = ca->parent) {
 		u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
 		*cpuusage += cputime;
+
+		/* Call back into platform code to account for CPU speeds */
+		if (ca->cpufreq_fn && ca->cpufreq_fn->charge)
+			ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu);
 	}
 
 	rcu_read_unlock();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f175d98..ea468b1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -96,6 +96,7 @@
 extern unsigned int core_pipe_limit;
 extern int pid_max;
 extern int min_free_kbytes;
+extern int min_free_order_shift;
 extern int pid_max_min, pid_max_max;
 extern int sysctl_drop_caches;
 extern int percpu_pagelist_fraction;
@@ -1189,6 +1190,13 @@
 		.extra1		= &zero,
 	},
 	{
+		.procname	= "min_free_order_shift",
+		.data		= &min_free_order_shift,
+		.maxlen		= sizeof(min_free_order_shift),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec
+	},
+	{
 		.procname	= "percpu_pagelist_fraction",
 		.data		= &percpu_pagelist_fraction,
 		.maxlen		= sizeof(percpu_pagelist_fraction),
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index e2fd74b..cae2ad7 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,5 +1,5 @@
 obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
-obj-y += timeconv.o posix-clock.o alarmtimer.o
+obj-y += timeconv.o posix-clock.o #alarmtimer.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)		+= clockevents.o
 obj-$(CONFIG_GENERIC_CLOCKEVENTS)		+= tick-common.o
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd373c8..c6b006a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -667,8 +667,9 @@
 	  mutexes and rwsems.
 
 config STACKTRACE
-	bool
+	bool "Stacktrace"
 	depends on STACKTRACE_SUPPORT
+	default y
 
 config DEBUG_STACK_USAGE
 	bool "Stack utilization instrumentation"
diff --git a/lib/plist.c b/lib/plist.c
index 0ae7e64..a0a4da4 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -56,11 +56,6 @@
 
 static void plist_check_head(struct plist_head *head)
 {
-	WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
-	if (head->rawlock)
-		WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
-	if (head->spinlock)
-		WARN_ON_SMP(!spin_is_locked(head->spinlock));
 	if (!plist_head_empty(head))
 		plist_check_list(&plist_first(head)->prio_list);
 	plist_check_list(&head->node_list);
@@ -180,7 +175,7 @@
 	unsigned int r = local_clock();
 
 	printk(KERN_INFO "start plist test\n");
-	plist_head_init(&test_head, NULL);
+	plist_head_init(&test_head);
 	for (i = 0; i < ARRAY_SIZE(test_node); i++)
 		plist_node_init(test_node + i, 0);
 
diff --git a/mm/Makefile b/mm/Makefile
index 836e416..2d00bf5 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,6 +30,7 @@
 obj-$(CONFIG_NUMA) 	+= mempolicy.o
 obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
+obj-$(CONFIG_ASHMEM) += ashmem.o
 obj-$(CONFIG_SLOB) += slob.o
 obj-$(CONFIG_COMPACTION) += compaction.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
diff --git a/mm/ashmem.c b/mm/ashmem.c
new file mode 100644
index 0000000..66e3f23
--- /dev/null
+++ b/mm/ashmem.c
@@ -0,0 +1,748 @@
+/* mm/ashmem.c
+**
+** Anonymous Shared Memory Subsystem, ashmem
+**
+** Copyright (C) 2008 Google, Inc.
+**
+** Robert Love <rlove@google.com>
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+** GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/ashmem.h>
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/*
+ * ashmem_area - anonymous shared memory area
+ * Lifecycle: From our parent file's open() until its release()
+ * Locking: Protected by `ashmem_mutex'
+ * Big Note: Mappings do NOT pin this structure; it dies on close()
+ */
+struct ashmem_area {
+	char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */
+	struct list_head unpinned_list;	/* list of all ashmem areas */
+	struct file *file;		/* the shmem-based backing file */
+	size_t size;			/* size of the mapping, in bytes */
+	unsigned long prot_mask;	/* allowed prot bits, as vm_flags */
+};
+
+/*
+ * ashmem_range - represents an interval of unpinned (evictable) pages
+ * Lifecycle: From unpin to pin
+ * Locking: Protected by `ashmem_mutex'
+ */
+struct ashmem_range {
+	struct list_head lru;		/* entry in LRU list */
+	struct list_head unpinned;	/* entry in its area's unpinned list */
+	struct ashmem_area *asma;	/* associated area */
+	size_t pgstart;			/* starting page, inclusive */
+	size_t pgend;			/* ending page, inclusive */
+	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+/* Count of pages on our LRU list, protected by ashmem_mutex */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+#define range_size(range) \
+  ((range)->pgend - (range)->pgstart + 1)
+
+#define range_on_lru(range) \
+  ((range)->purged == ASHMEM_NOT_PURGED)
+
+#define page_range_subsumes_range(range, start, end) \
+  (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+
+#define page_range_subsumed_by_range(range, start, end) \
+  (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+
+#define page_in_range(range, page) \
+ (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+
+#define page_range_in_range(range, start, end) \
+  (page_in_range(range, start) || page_in_range(range, end) || \
+   page_range_subsumes_range(range, start, end))
+
+#define range_before_page(range, page) \
+  ((range)->pgend < (page))
+
+#define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
+
+static inline void lru_add(struct ashmem_range *range)
+{
+	list_add_tail(&range->lru, &ashmem_lru_list);
+	lru_count += range_size(range);
+}
+
+static inline void lru_del(struct ashmem_range *range)
+{
+	list_del(&range->lru);
+	lru_count -= range_size(range);
+}
+
+/*
+ * range_alloc - allocate and initialize a new ashmem_range structure
+ *
+ * 'asma' - associated ashmem_area
+ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
+ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * 'start' - starting page, inclusive
+ * 'end' - ending page, inclusive
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int range_alloc(struct ashmem_area *asma,
+		       struct ashmem_range *prev_range, unsigned int purged,
+		       size_t start, size_t end)
+{
+	struct ashmem_range *range;
+
+	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+	if (unlikely(!range))
+		return -ENOMEM;
+
+	range->asma = asma;
+	range->pgstart = start;
+	range->pgend = end;
+	range->purged = purged;
+
+	list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+	if (range_on_lru(range))
+		lru_add(range);
+
+	return 0;
+}
+
+static void range_del(struct ashmem_range *range)
+{
+	list_del(&range->unpinned);
+	if (range_on_lru(range))
+		lru_del(range);
+	kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/*
+ * range_shrink - shrinks a range
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+				size_t start, size_t end)
+{
+	size_t pre = range_size(range);
+
+	range->pgstart = start;
+	range->pgend = end;
+
+	if (range_on_lru(range))
+		lru_count -= pre - range_size(range);
+}
+
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+	struct ashmem_area *asma;
+	int ret;
+
+	ret = generic_file_open(inode, file);
+	if (unlikely(ret))
+		return ret;
+
+	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+	if (unlikely(!asma))
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&asma->unpinned_list);
+	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+	asma->prot_mask = PROT_MASK;
+	file->private_data = asma;
+
+	return 0;
+}
+
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+	struct ashmem_area *asma = file->private_data;
+	struct ashmem_range *range, *next;
+
+	mutex_lock(&ashmem_mutex);
+	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+		range_del(range);
+	mutex_unlock(&ashmem_mutex);
+
+	if (asma->file)
+		fput(asma->file);
+	kmem_cache_free(ashmem_area_cachep, asma);
+
+	return 0;
+}
+
+static ssize_t ashmem_read(struct file *file, char __user *buf,
+			   size_t len, loff_t *pos)
+{
+	struct ashmem_area *asma = file->private_data;
+	int ret = 0;
+
+	mutex_lock(&ashmem_mutex);
+
+	/* If size is not set, or set to 0, always return EOF. */
+	if (asma->size == 0) {
+		goto out;
+        }
+
+	if (!asma->file) {
+		ret = -EBADF;
+		goto out;
+	}
+
+	ret = asma->file->f_op->read(asma->file, buf, len, pos);
+	if (ret < 0) {
+		goto out;
+	}
+
+	/** Update backing file pos, since f_ops->read() doesn't */
+	asma->file->f_pos = *pos;
+
+out:
+	mutex_unlock(&ashmem_mutex);
+	return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+	struct ashmem_area *asma = file->private_data;
+	int ret;
+
+	mutex_lock(&ashmem_mutex);
+
+	if (asma->size == 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!asma->file) {
+		ret = -EBADF;
+		goto out;
+	}
+
+	ret = asma->file->f_op->llseek(asma->file, offset, origin);
+	if (ret < 0) {
+		goto out;
+	}
+
+	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
+	file->f_pos = asma->file->f_pos;
+
+out:
+	mutex_unlock(&ashmem_mutex);
+	return ret;
+}
+
+static inline unsigned long
+calc_vm_may_flags(unsigned long prot)
+{
+	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD ) |
+	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct ashmem_area *asma = file->private_data;
+	int ret = 0;
+
+	mutex_lock(&ashmem_mutex);
+
+	/* user needs to SET_SIZE before mapping */
+	if (unlikely(!asma->size)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* requested protection bits must match our allowed protection mask */
+	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
+						calc_vm_prot_bits(PROT_MASK))) {
+		ret = -EPERM;
+		goto out;
+	}
+	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
+
+	if (!asma->file) {
+		char *name = ASHMEM_NAME_DEF;
+		struct file *vmfile;
+
+		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+			name = asma->name;
+
+		/* ... and allocate the backing shmem file */
+		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
+		if (unlikely(IS_ERR(vmfile))) {
+			ret = PTR_ERR(vmfile);
+			goto out;
+		}
+		asma->file = vmfile;
+	}
+	get_file(asma->file);
+
+	if (vma->vm_flags & VM_SHARED)
+		shmem_set_file(vma, asma->file);
+	else {
+		if (vma->vm_file)
+			fput(vma->vm_file);
+		vma->vm_file = asma->file;
+	}
+	vma->vm_flags |= VM_CAN_NONLINEAR;
+
+out:
+	mutex_unlock(&ashmem_mutex);
+	return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ *
+ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
+ * many objects (pages) we have in total.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+	struct ashmem_range *range, *next;
+
+	/* We might recurse into filesystem code, so bail out if necessary */
+	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+		return -1;
+	if (!sc->nr_to_scan)
+		return lru_count;
+
+	mutex_lock(&ashmem_mutex);
+	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+		struct inode *inode = range->asma->file->f_dentry->d_inode;
+		loff_t start = range->pgstart * PAGE_SIZE;
+		loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+
+		vmtruncate_range(inode, start, end);
+		range->purged = ASHMEM_WAS_PURGED;
+		lru_del(range);
+
+		sc->nr_to_scan -= range_size(range);
+		if (sc->nr_to_scan <= 0)
+			break;
+	}
+	mutex_unlock(&ashmem_mutex);
+
+	return lru_count;
+}
+
+static struct shrinker ashmem_shrinker = {
+	.shrink = ashmem_shrink,
+	.seeks = DEFAULT_SEEKS * 4,
+};
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+	int ret = 0;
+
+	mutex_lock(&ashmem_mutex);
+
+	/* the user can only remove, not add, protection bits */
+	if (unlikely((asma->prot_mask & prot) != prot)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* does the application expect PROT_READ to imply PROT_EXEC? */
+	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+		prot |= PROT_EXEC;
+
+	asma->prot_mask = prot;
+
+out:
+	mutex_unlock(&ashmem_mutex);
+	return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+	int ret = 0;
+
+	mutex_lock(&ashmem_mutex);
+
+	/* cannot change an existing mapping's name */
+	if (unlikely(asma->file)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
+				    name, ASHMEM_NAME_LEN)))
+		ret = -EFAULT;
+	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
+
+out:
+	mutex_unlock(&ashmem_mutex);
+
+	return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+	int ret = 0;
+
+	mutex_lock(&ashmem_mutex);
+	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+		size_t len;
+
+		/*
+		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+		 * prevents us from revealing one user's stack to another.
+		 */
+		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+		if (unlikely(copy_to_user(name,
+				asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
+			ret = -EFAULT;
+	} else {
+		if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
+					  sizeof(ASHMEM_NAME_DEF))))
+			ret = -EFAULT;
+	}
+	mutex_unlock(&ashmem_mutex);
+
+	return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+	struct ashmem_range *range, *next;
+	int ret = ASHMEM_NOT_PURGED;
+
+	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+		/* moved past last applicable page; we can short circuit */
+		if (range_before_page(range, pgstart))
+			break;
+
+		/*
+		 * The user can ask us to pin pages that span multiple ranges,
+		 * or to pin pages that aren't even unpinned, so this is messy.
+		 *
+		 * Four cases:
+		 * 1. The requested range subsumes an existing range, so we
+		 *    just remove the entire matching range.
+		 * 2. The requested range overlaps the start of an existing
+		 *    range, so we just update that range.
+		 * 3. The requested range overlaps the end of an existing
+		 *    range, so we just update that range.
+		 * 4. The requested range punches a hole in an existing range,
+		 *    so we have to update one side of the range and then
+		 *    create a new range for the other side.
+		 */
+		if (page_range_in_range(range, pgstart, pgend)) {
+			ret |= range->purged;
+
+			/* Case #1: Easy. Just nuke the whole thing. */
+			if (page_range_subsumes_range(range, pgstart, pgend)) {
+				range_del(range);
+				continue;
+			}
+
+			/* Case #2: We overlap from the start, so adjust it */
+			if (range->pgstart >= pgstart) {
+				range_shrink(range, pgend + 1, range->pgend);
+				continue;
+			}
+
+			/* Case #3: We overlap from the rear, so adjust it */
+			if (range->pgend <= pgend) {
+				range_shrink(range, range->pgstart, pgstart-1);
+				continue;
+			}
+
+			/*
+			 * Case #4: We eat a chunk out of the middle. A bit
+			 * more complicated, we allocate a new range for the
+			 * second half and adjust the first chunk's endpoint.
+			 */
+			range_alloc(asma, range, range->purged,
+				    pgend + 1, range->pgend);
+			range_shrink(range, range->pgstart, pgstart - 1);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+	struct ashmem_range *range, *next;
+	unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+		/* short circuit: this is our insertion point */
+		if (range_before_page(range, pgstart))
+			break;
+
+		/*
+		 * The user can ask us to unpin pages that are already entirely
+		 * or partially pinned. We handle those two cases here.
+		 */
+		if (page_range_subsumed_by_range(range, pgstart, pgend))
+			return 0;
+		if (page_range_in_range(range, pgstart, pgend)) {
+			pgstart = min_t(size_t, range->pgstart, pgstart),
+			pgend = max_t(size_t, range->pgend, pgend);
+			purged |= range->purged;
+			range_del(range);
+			goto restart;
+		}
+	}
+
+	return range_alloc(asma, range, purged, pgstart, pgend);
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+				 size_t pgend)
+{
+	struct ashmem_range *range;
+	int ret = ASHMEM_IS_PINNED;
+
+	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+		if (range_before_page(range, pgstart))
+			break;
+		if (page_range_in_range(range, pgstart, pgend)) {
+			ret = ASHMEM_IS_UNPINNED;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+			    void __user *p)
+{
+	struct ashmem_pin pin;
+	size_t pgstart, pgend;
+	int ret = -EINVAL;
+
+	if (unlikely(!asma->file))
+		return -EINVAL;
+
+	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+		return -EFAULT;
+
+	/* per custom, you can pass zero for len to mean "everything onward" */
+	if (!pin.len)
+		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+		return -EINVAL;
+
+	if (unlikely(((__u32) -1) - pin.offset < pin.len))
+		return -EINVAL;
+
+	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+		return -EINVAL;
+
+	pgstart = pin.offset / PAGE_SIZE;
+	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+	mutex_lock(&ashmem_mutex);
+
+	switch (cmd) {
+	case ASHMEM_PIN:
+		ret = ashmem_pin(asma, pgstart, pgend);
+		break;
+	case ASHMEM_UNPIN:
+		ret = ashmem_unpin(asma, pgstart, pgend);
+		break;
+	case ASHMEM_GET_PIN_STATUS:
+		ret = ashmem_get_pin_status(asma, pgstart, pgend);
+		break;
+	}
+
+	mutex_unlock(&ashmem_mutex);
+
+	return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct ashmem_area *asma = file->private_data;
+	long ret = -ENOTTY;
+
+	switch (cmd) {
+	case ASHMEM_SET_NAME:
+		ret = set_name(asma, (void __user *) arg);
+		break;
+	case ASHMEM_GET_NAME:
+		ret = get_name(asma, (void __user *) arg);
+		break;
+	case ASHMEM_SET_SIZE:
+		ret = -EINVAL;
+		if (!asma->file) {
+			ret = 0;
+			asma->size = (size_t) arg;
+		}
+		break;
+	case ASHMEM_GET_SIZE:
+		ret = asma->size;
+		break;
+	case ASHMEM_SET_PROT_MASK:
+		ret = set_prot_mask(asma, arg);
+		break;
+	case ASHMEM_GET_PROT_MASK:
+		ret = asma->prot_mask;
+		break;
+	case ASHMEM_PIN:
+	case ASHMEM_UNPIN:
+	case ASHMEM_GET_PIN_STATUS:
+		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+		break;
+	case ASHMEM_PURGE_ALL_CACHES:
+		ret = -EPERM;
+		if (capable(CAP_SYS_ADMIN)) {
+			struct shrink_control sc = {
+				.gfp_mask = GFP_KERNEL,
+				.nr_to_scan = 0,
+			};
+			ret = ashmem_shrink(&ashmem_shrinker, &sc);
+			sc.nr_to_scan = ret;
+			ashmem_shrink(&ashmem_shrinker, &sc);
+		}
+		break;
+	}
+
+	return ret;
+}
+
+static struct file_operations ashmem_fops = {
+	.owner = THIS_MODULE,
+	.open = ashmem_open,
+	.release = ashmem_release,
+        .read = ashmem_read,
+        .llseek = ashmem_llseek,
+	.mmap = ashmem_mmap,
+	.unlocked_ioctl = ashmem_ioctl,
+	.compat_ioctl = ashmem_ioctl,
+};
+
+static struct miscdevice ashmem_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "ashmem",
+	.fops = &ashmem_fops,
+};
+
+static int __init ashmem_init(void)
+{
+	int ret;
+
+	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+					  sizeof(struct ashmem_area),
+					  0, 0, NULL);
+	if (unlikely(!ashmem_area_cachep)) {
+		printk(KERN_ERR "ashmem: failed to create slab cache\n");
+		return -ENOMEM;
+	}
+
+	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+					  sizeof(struct ashmem_range),
+					  0, 0, NULL);
+	if (unlikely(!ashmem_range_cachep)) {
+		printk(KERN_ERR "ashmem: failed to create slab cache\n");
+		return -ENOMEM;
+	}
+
+	ret = misc_register(&ashmem_misc);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "ashmem: failed to register misc device!\n");
+		return ret;
+	}
+
+	register_shrinker(&ashmem_shrinker);
+
+	printk(KERN_INFO "ashmem: initialized\n");
+
+	return 0;
+}
+
+static void __exit ashmem_exit(void)
+{
+	int ret;
+
+	unregister_shrinker(&ashmem_shrinker);
+
+	ret = misc_deregister(&ashmem_misc);
+	if (unlikely(ret))
+		printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
+
+	kmem_cache_destroy(ashmem_range_cachep);
+	kmem_cache_destroy(ashmem_area_cachep);
+
+	printk(KERN_INFO "ashmem: unloaded\n");
+}
+
+module_init(ashmem_init);
+module_exit(ashmem_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e8985a..fef8dc3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -176,6 +176,7 @@
 };
 
 int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
 
 static unsigned long __meminitdata nr_kernel_pages;
 static unsigned long __meminitdata nr_all_pages;
@@ -1487,7 +1488,7 @@
 		free_pages -= z->free_area[o].nr_free << o;
 
 		/* Require fewer higher order pages to be free */
-		min >>= 1;
+		min >>= min_free_order_shift;
 
 		if (free_pages <= min)
 			return false;
diff --git a/mm/shmem.c b/mm/shmem.c
index fcedf54..883e98f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3015,6 +3015,15 @@
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+	if (vma->vm_file)
+		fput(vma->vm_file);
+	vma->vm_file = file;
+	vma->vm_ops = &shmem_vm_ops;
+	vma->vm_flags |= VM_CAN_NONLINEAR;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3028,11 +3037,7 @@
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	vma->vm_file = file;
-	vma->vm_ops = &shmem_vm_ops;
-	vma->vm_flags |= VM_CAN_NONLINEAR;
+	shmem_set_file(vma, file);
 	return 0;
 }
 
diff --git a/net/Kconfig b/net/Kconfig
index 878151c..919cf9a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -79,6 +79,20 @@
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+	bool "Only allow certain groups to create sockets"
+	default y
+	help
+		none
+
+config NET_ACTIVITY_STATS
+	bool "Network activity statistics tracking"
+	default y
+	help
+	 Network activity statistics are useful for tracking wireless
+	 modem activity on 2G, 3G, 4G wireless networks. Counts number of
+	 transmissions and groups them in specified time buckets.
+
 config NETWORK_SECMARK
 	bool "Security Marking"
 	help
@@ -217,7 +231,7 @@
 source "net/batman-adv/Kconfig"
 
 config RPS
-	boolean
+	boolean "RPS"
 	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 	default y
 
diff --git a/net/Makefile b/net/Makefile
index a51d946..54808ab 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -68,3 +68,4 @@
 obj-$(CONFIG_DNS_RESOLVER)	+= dns_resolver/
 obj-$(CONFIG_CEPH_LIB)		+= ceph/
 obj-$(CONFIG_BATMAN_ADV)	+= batman-adv/
+obj-$(CONFIG_NET_ACTIVITY_STATS)		+= activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 0000000..8a3e934
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,115 @@
+/* net/activity_stats.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/suspend.h>
+#include <net/net_namespace.h>
+
+/*
+ * Track transmission rates in buckets (power of 2).
+ * 1,2,4,8...512 seconds.
+ *
+ * Buckets represent the count of network transmissions at least
+ * N seconds apart, where N is 1 << bucket index.
+ */
+#define BUCKET_MAX 10
+
+/* Track network activity frequency */
+static unsigned long activity_stats[BUCKET_MAX];
+static ktime_t last_transmit;
+static ktime_t suspend_time;
+static DEFINE_SPINLOCK(activity_lock);
+
+void activity_stats_update(void)
+{
+	int i;
+	unsigned long flags;
+	ktime_t now;
+	s64 delta;
+
+	spin_lock_irqsave(&activity_lock, flags);
+	now = ktime_get();
+	delta = ktime_to_ns(ktime_sub(now, last_transmit));
+
+	for (i = BUCKET_MAX - 1; i >= 0; i--) {
+		/*
+		 * Check if the time delta between network activity is within the
+		 * minimum bucket range.
+		 */
+		if (delta < (1000000000ULL << i))
+			continue;
+
+		activity_stats[i]++;
+		last_transmit = now;
+		break;
+	}
+	spin_unlock_irqrestore(&activity_lock, flags);
+}
+
+static int activity_stats_read_proc(char *page, char **start, off_t off,
+					int count, int *eof, void *data)
+{
+	int i;
+	int len;
+	char *p = page;
+
+	/* Only print if offset is 0, or we have enough buffer space */
+	if (off || count < (30 * BUCKET_MAX + 22))
+		return -ENOMEM;
+
+	len = snprintf(p, count, "Min Bucket(sec) Count\n");
+	count -= len;
+	p += len;
+
+	for (i = 0; i < BUCKET_MAX; i++) {
+		len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]);
+		count -= len;
+		p += len;
+	}
+	*eof = 1;
+
+	return p - page;
+}
+
+static int activity_stats_notifier(struct notifier_block *nb,
+					unsigned long event, void *dummy)
+{
+	switch (event) {
+		case PM_SUSPEND_PREPARE:
+			suspend_time = ktime_get_real();
+			break;
+
+		case PM_POST_SUSPEND:
+			suspend_time = ktime_sub(ktime_get_real(), suspend_time);
+			last_transmit = ktime_sub(last_transmit, suspend_time);
+	}
+
+	return 0;
+}
+
+static struct notifier_block activity_stats_notifier_block = {
+	.notifier_call = activity_stats_notifier,
+};
+
+static int  __init activity_stats_init(void)
+{
+	create_proc_read_entry("activity", S_IRUGO,
+			init_net.proc_net_stat, activity_stats_read_proc, NULL);
+	return register_pm_notifier(&activity_stats_notifier_block);
+}
+
+subsys_initcall(activity_stats_init);
+
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 6ae5ec5..bfb3dc0 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,6 +6,7 @@
 	tristate "Bluetooth subsystem support"
 	depends on NET && !S390
 	depends on RFKILL || !RFKILL
+	select CRYPTO
 	help
 	  Bluetooth is low-cost, low-power, short-range wireless technology.
 	  It was designed as a replacement for cables and other short-range
@@ -22,6 +23,7 @@
 	     BNEP Module (Bluetooth Network Encapsulation Protocol)
 	     CMTP Module (CAPI Message Transport Protocol)
 	     HIDP Module (Human Interface Device Protocol)
+	     SMP Module (Security Manager Protocol)
 
 	  Say Y here to compile Bluetooth support into the kernel or say M to
 	  compile it as module (bluetooth).
@@ -36,11 +38,18 @@
 config BT_L2CAP
 	bool "L2CAP protocol support"
 	select CRC16
+	select CRYPTO
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	select CRYPTO_ECB
 	help
 	  L2CAP (Logical Link Control and Adaptation Protocol) provides
 	  connection oriented and connection-less data transport.  L2CAP
 	  support is required for most Bluetooth applications.
 
+	  Also included is support for SMP (Security Manager Protocol) which
+	  is the security layer on top of LE (Low Energy) links.
+
 config BT_SCO
 	bool "SCO links support"
 	help
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index f04fe9a..9b67f3d 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,5 +9,5 @@
 obj-$(CONFIG_BT_HIDP)	+= hidp/
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
-bluetooth-$(CONFIG_BT_L2CAP)	+= l2cap_core.o l2cap_sock.o
+bluetooth-$(CONFIG_BT_L2CAP)	+= l2cap_core.o l2cap_sock.o smp.o
 bluetooth-$(CONFIG_BT_SCO)	+= sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 8add9b4..e87d15d 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,6 +40,15 @@
 
 #include <net/bluetooth/bluetooth.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
+#ifndef CONFIG_BT_SOCK_DEBUG
+#undef  BT_DBG
+#define BT_DBG(D...)
+#endif
+
 #define VERSION "2.16"
 
 /* Bluetooth sockets */
@@ -125,11 +134,40 @@
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+	return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN));
+}
+
+static inline int current_has_bt(void)
+{
+	return (current_has_bt_admin() || in_egroup_p(AID_NET_BT));
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+	return 1;
+}
+
+static inline int current_has_bt(void)
+{
+	return 1;
+}
+#endif
+
 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
 			  int kern)
 {
 	int err;
 
+	if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+			proto == BTPROTO_L2CAP) {
+		if (!current_has_bt())
+			return -EPERM;
+	} else if (!current_has_bt_admin())
+		return -EPERM;
+
 	if (net != &init_net)
 		return -EAFNOSUPPORT;
 
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 744233c..040f67b 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -326,7 +326,7 @@
 {
 	struct capi_ctr *ctrl = &session->ctrl;
 	struct cmtp_application *application;
-	__u16 cmd, appl;
+	__u16 appl;
 	__u32 contr;
 
 	BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,7 +344,6 @@
 		return;
 	}
 
-	cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
 	appl = CAPIMSG_APPID(skb->data);
 	contr = CAPIMSG_CONTROL(skb->data);
 
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index d3a05b9..b7ff6e3 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -53,11 +53,13 @@
 	conn->state = BT_CONNECT;
 	conn->out = 1;
 	conn->link_mode |= HCI_LM_MASTER;
+	conn->sec_level = BT_SECURITY_LOW;
 
 	memset(&cp, 0, sizeof(cp));
 	cp.scan_interval = cpu_to_le16(0x0004);
 	cp.scan_window = cpu_to_le16(0x0004);
 	bacpy(&cp.peer_addr, &conn->dst);
+	cp.peer_addr_type = conn->dst_type;
 	cp.conn_interval_min = cpu_to_le16(0x0008);
 	cp.conn_interval_max = cpu_to_le16(0x0100);
 	cp.supervision_timeout = cpu_to_le16(0x0064);
@@ -203,6 +205,55 @@
 }
 EXPORT_SYMBOL(hci_le_conn_update);
 
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
+							__u8 ltk[16])
+{
+	struct hci_dev *hdev = conn->hdev;
+	struct hci_cp_le_start_enc cp;
+
+	BT_DBG("%p", conn);
+
+	memset(&cp, 0, sizeof(cp));
+
+	cp.handle = cpu_to_le16(conn->handle);
+	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
+	cp.ediv = ediv;
+	memcpy(cp.rand, rand, sizeof(rand));
+
+	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_le_start_enc);
+
+void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
+{
+	struct hci_dev *hdev = conn->hdev;
+	struct hci_cp_le_ltk_reply cp;
+
+	BT_DBG("%p", conn);
+
+	memset(&cp, 0, sizeof(cp));
+
+	cp.handle = cpu_to_le16(conn->handle);
+	memcpy(cp.ltk, ltk, sizeof(ltk));
+
+	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_le_ltk_reply);
+
+void hci_le_ltk_neg_reply(struct hci_conn *conn)
+{
+	struct hci_dev *hdev = conn->hdev;
+	struct hci_cp_le_ltk_neg_reply cp;
+
+	BT_DBG("%p", conn);
+
+	memset(&cp, 0, sizeof(cp));
+
+	cp.handle = cpu_to_le16(conn->handle);
+
+	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
+}
+
 /* Device _must_ be locked */
 void hci_sco_setup(struct hci_conn *conn, __u8 status)
 {
@@ -282,7 +333,8 @@
 	hci_dev_unlock(hdev);
 }
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+					__u16 pkt_type, bdaddr_t *dst)
 {
 	struct hci_conn *conn;
 
@@ -310,14 +362,22 @@
 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
 		break;
 	case SCO_LINK:
-		if (lmp_esco_capable(hdev))
-			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
-					(hdev->esco_type & EDR_ESCO_MASK);
-		else
-			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
-		break;
+		if (!pkt_type)
+			pkt_type = SCO_ESCO_MASK;
 	case ESCO_LINK:
-		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+		if (!pkt_type)
+			pkt_type = ALL_ESCO_MASK;
+		if (lmp_esco_capable(hdev)) {
+			/* HCI Setup Synchronous Connection Command uses
+			   reverse logic on the EDR_ESCO_MASK bits */
+			conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
+					hdev->esco_type;
+		} else {
+			/* Legacy HCI Add Sco Connection Command uses a
+			   shifted bitmask */
+			conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
+					SCO_PTYPE_MASK;
+		}
 		break;
 	}
 
@@ -438,7 +498,9 @@
 
 /* Create SCO, ACL or LE connection.
  * Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+					__u16 pkt_type, bdaddr_t *dst,
+					__u8 sec_level, __u8 auth_type)
 {
 	struct hci_conn *acl;
 	struct hci_conn *sco;
@@ -447,14 +509,23 @@
 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
 
 	if (type == LE_LINK) {
+		struct adv_entry *entry;
+
 		le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
 		if (le)
 			return ERR_PTR(-EBUSY);
-		le = hci_conn_add(hdev, LE_LINK, dst);
+
+		entry = hci_find_adv_entry(hdev, dst);
+		if (!entry)
+			return ERR_PTR(-EHOSTUNREACH);
+
+		le = hci_conn_add(hdev, LE_LINK, 0, dst);
 		if (!le)
 			return ERR_PTR(-ENOMEM);
-		if (le->state == BT_OPEN)
-			hci_le_connect(le);
+
+		le->dst_type = entry->bdaddr_type;
+
+		hci_le_connect(le);
 
 		hci_conn_hold(le);
 
@@ -463,7 +534,7 @@
 
 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
 	if (!acl) {
-		acl = hci_conn_add(hdev, ACL_LINK, dst);
+		acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
 		if (!acl)
 			return NULL;
 	}
@@ -482,7 +553,7 @@
 
 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
 	if (!sco) {
-		sco = hci_conn_add(hdev, type, dst);
+		sco = hci_conn_add(hdev, type, pkt_type, dst);
 		if (!sco) {
 			hci_conn_put(acl);
 			return NULL;
@@ -497,7 +568,7 @@
 	if (acl->state == BT_CONNECTED &&
 			(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
 		acl->power_save = 1;
-		hci_conn_enter_active_mode(acl);
+		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
 
 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
 			/* defer SCO setup until mode change completed */
@@ -548,6 +619,8 @@
 		cp.handle = cpu_to_le16(conn->handle);
 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
 							sizeof(cp), &cp);
+		if (conn->key_type != 0xff)
+			set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
 	}
 
 	return 0;
@@ -631,9 +704,7 @@
 	if (sec_level != BT_SECURITY_HIGH)
 		return 1; /* Accept if non-secure is required */
 
-	if (conn->key_type == HCI_LK_AUTH_COMBINATION ||
-			(conn->key_type == HCI_LK_COMBINATION &&
-			conn->pin_length == 16))
+	if (conn->sec_level == BT_SECURITY_HIGH)
 		return 1;
 
 	return 0; /* Reject not secure link */
@@ -676,7 +747,7 @@
 EXPORT_SYMBOL(hci_conn_switch_role);
 
 /* Enter active mode */
-void hci_conn_enter_active_mode(struct hci_conn *conn)
+void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
 {
 	struct hci_dev *hdev = conn->hdev;
 
@@ -685,7 +756,10 @@
 	if (test_bit(HCI_RAW, &hdev->flags))
 		return;
 
-	if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
+	if (conn->mode != HCI_CM_SNIFF)
+		goto timer;
+
+	if (!conn->power_save && !force_active)
 		goto timer;
 
 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
@@ -826,6 +900,15 @@
 		(ci + n)->out   = c->out;
 		(ci + n)->state = c->state;
 		(ci + n)->link_mode = c->link_mode;
+		if (c->type == SCO_LINK) {
+			(ci + n)->mtu = hdev->sco_mtu;
+			(ci + n)->cnt = hdev->sco_cnt;
+			(ci + n)->pkts = hdev->sco_pkts;
+		} else {
+			(ci + n)->mtu = hdev->acl_mtu;
+			(ci + n)->cnt = hdev->acl_cnt;
+			(ci + n)->pkts = hdev->acl_pkts;
+		}
 		if (++n >= req.conn_num)
 			break;
 	}
@@ -862,6 +945,15 @@
 		ci.out   = conn->out;
 		ci.state = conn->state;
 		ci.link_mode = conn->link_mode;
+		if (req.type == SCO_LINK) {
+			ci.mtu = hdev->sco_mtu;
+			ci.cnt = hdev->sco_cnt;
+			ci.pkts = hdev->sco_pkts;
+		} else {
+			ci.mtu = hdev->acl_mtu;
+			ci.cnt = hdev->acl_cnt;
+			ci.pkts = hdev->acl_pkts;
+		}
 	}
 	hci_dev_unlock_bh(hdev);
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 815269b..908fcd3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -42,6 +42,7 @@
 #include <linux/notifier.h>
 #include <linux/rfkill.h>
 #include <linux/timer.h>
+#include <linux/crypto.h>
 #include <net/sock.h>
 
 #include <asm/system.h>
@@ -145,7 +146,7 @@
 
 	switch (hdev->req_status) {
 	case HCI_REQ_DONE:
-		err = -bt_err(hdev->req_result);
+		err = -bt_to_errno(hdev->req_result);
 		break;
 
 	case HCI_REQ_CANCELED:
@@ -539,7 +540,7 @@
 		ret = __hci_request(hdev, hci_init_req, 0,
 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
-		if (lmp_le_capable(hdev))
+		if (lmp_host_le_capable(hdev))
 			ret = __hci_request(hdev, hci_le_init_req, 0,
 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
@@ -1056,6 +1057,42 @@
 	return 0;
 }
 
+struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
+{
+	struct link_key *k;
+
+	list_for_each_entry(k, &hdev->link_keys, list) {
+		struct key_master_id *id;
+
+		if (k->type != HCI_LK_SMP_LTK)
+			continue;
+
+		if (k->dlen != sizeof(*id))
+			continue;
+
+		id = (void *) &k->data;
+		if (id->ediv == ediv &&
+				(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
+			return k;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(hci_find_ltk);
+
+struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
+					bdaddr_t *bdaddr, u8 type)
+{
+	struct link_key *k;
+
+	list_for_each_entry(k, &hdev->link_keys, list)
+		if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
+			return k;
+
+	return NULL;
+}
+EXPORT_SYMBOL(hci_find_link_key_type);
+
 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
 				bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
 {
@@ -1111,6 +1148,44 @@
 	return 0;
 }
 
+int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+			u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
+{
+	struct link_key *key, *old_key;
+	struct key_master_id *id;
+	u8 old_key_type;
+
+	BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
+
+	old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
+	if (old_key) {
+		key = old_key;
+		old_key_type = old_key->type;
+	} else {
+		key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
+		if (!key)
+			return -ENOMEM;
+		list_add(&key->list, &hdev->link_keys);
+		old_key_type = 0xff;
+	}
+
+	key->dlen = sizeof(*id);
+
+	bacpy(&key->bdaddr, bdaddr);
+	memcpy(key->val, ltk, sizeof(key->val));
+	key->type = HCI_LK_SMP_LTK;
+	key->pin_len = key_size;
+
+	id = (void *) &key->data;
+	id->ediv = ediv;
+	memcpy(id->rand, rand, sizeof(id->rand));
+
+	if (new_key)
+		mgmt_new_key(hdev->id, key, old_key_type);
+
+	return 0;
+}
+
 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
 	struct link_key *key;
@@ -1202,6 +1277,169 @@
 	return 0;
 }
 
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+						bdaddr_t *bdaddr)
+{
+	struct list_head *p;
+
+	list_for_each(p, &hdev->blacklist) {
+		struct bdaddr_list *b;
+
+		b = list_entry(p, struct bdaddr_list, list);
+
+		if (bacmp(bdaddr, &b->bdaddr) == 0)
+			return b;
+	}
+
+	return NULL;
+}
+
+int hci_blacklist_clear(struct hci_dev *hdev)
+{
+	struct list_head *p, *n;
+
+	list_for_each_safe(p, n, &hdev->blacklist) {
+		struct bdaddr_list *b;
+
+		b = list_entry(p, struct bdaddr_list, list);
+
+		list_del(p);
+		kfree(b);
+	}
+
+	return 0;
+}
+
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+	struct bdaddr_list *entry;
+	int err;
+
+	if (bacmp(bdaddr, BDADDR_ANY) == 0)
+		return -EBADF;
+
+	hci_dev_lock_bh(hdev);
+
+	if (hci_blacklist_lookup(hdev, bdaddr)) {
+		err = -EEXIST;
+		goto err;
+	}
+
+	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+	if (!entry) {
+		return -ENOMEM;
+		goto err;
+	}
+
+	bacpy(&entry->bdaddr, bdaddr);
+
+	list_add(&entry->list, &hdev->blacklist);
+
+	err = 0;
+
+err:
+	hci_dev_unlock_bh(hdev);
+	return err;
+}
+
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+	struct bdaddr_list *entry;
+	int err = 0;
+
+	hci_dev_lock_bh(hdev);
+
+	if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+		hci_blacklist_clear(hdev);
+		goto done;
+	}
+
+	entry = hci_blacklist_lookup(hdev, bdaddr);
+	if (!entry) {
+		err = -ENOENT;
+		goto done;
+	}
+
+	list_del(&entry->list);
+	kfree(entry);
+
+done:
+	hci_dev_unlock_bh(hdev);
+	return err;
+}
+
+static void hci_clear_adv_cache(unsigned long arg)
+{
+	struct hci_dev *hdev = (void *) arg;
+
+	hci_dev_lock(hdev);
+
+	hci_adv_entries_clear(hdev);
+
+	hci_dev_unlock(hdev);
+}
+
+int hci_adv_entries_clear(struct hci_dev *hdev)
+{
+	struct adv_entry *entry, *tmp;
+
+	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
+		list_del(&entry->list);
+		kfree(entry);
+	}
+
+	BT_DBG("%s adv cache cleared", hdev->name);
+
+	return 0;
+}
+
+struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+	struct adv_entry *entry;
+
+	list_for_each_entry(entry, &hdev->adv_entries, list)
+		if (bacmp(bdaddr, &entry->bdaddr) == 0)
+			return entry;
+
+	return NULL;
+}
+
+static inline int is_connectable_adv(u8 evt_type)
+{
+	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
+		return 1;
+
+	return 0;
+}
+
+int hci_add_adv_entry(struct hci_dev *hdev,
+					struct hci_ev_le_advertising_info *ev)
+{
+	struct adv_entry *entry;
+
+	if (!is_connectable_adv(ev->evt_type))
+		return -EINVAL;
+
+	/* Only new entries should be added to adv_entries. So, if
+	 * bdaddr was found, don't add it. */
+	if (hci_find_adv_entry(hdev, &ev->bdaddr))
+		return 0;
+
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		return -ENOMEM;
+
+	bacpy(&entry->bdaddr, &ev->bdaddr);
+	entry->bdaddr_type = ev->bdaddr_type;
+
+	list_add(&entry->list, &hdev->adv_entries);
+
+	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
+				batostr(&entry->bdaddr), entry->bdaddr_type);
+
+	return 0;
+}
+
 /* Register HCI device */
 int hci_register_dev(struct hci_dev *hdev)
 {
@@ -1268,6 +1506,10 @@
 
 	INIT_LIST_HEAD(&hdev->remote_oob_data);
 
+	INIT_LIST_HEAD(&hdev->adv_entries);
+	setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
+						(unsigned long) hdev);
+
 	INIT_WORK(&hdev->power_on, hci_power_on);
 	INIT_WORK(&hdev->power_off, hci_power_off);
 	setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1282,6 +1524,11 @@
 	if (!hdev->workqueue)
 		goto nomem;
 
+	hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hdev->tfm))
+		BT_INFO("Failed to load transform for ecb(aes): %ld",
+							PTR_ERR(hdev->tfm));
+
 	hci_register_sysfs(hdev);
 
 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1330,6 +1577,9 @@
 					!test_bit(HCI_SETUP, &hdev->flags))
 		mgmt_index_removed(hdev->id);
 
+	if (!IS_ERR(hdev->tfm))
+		crypto_free_blkcipher(hdev->tfm);
+
 	hci_notify(hdev, HCI_DEV_UNREG);
 
 	if (hdev->rfkill) {
@@ -1340,6 +1590,7 @@
 	hci_unregister_sysfs(hdev);
 
 	hci_del_off_timer(hdev);
+	del_timer(&hdev->adv_timer);
 
 	destroy_workqueue(hdev->workqueue);
 
@@ -1348,6 +1599,7 @@
 	hci_uuids_clear(hdev);
 	hci_link_keys_clear(hdev);
 	hci_remote_oob_data_clear(hdev);
+	hci_adv_entries_clear(hdev);
 	hci_dev_unlock_bh(hdev);
 
 	__hci_dev_put(hdev);
@@ -1891,7 +2143,7 @@
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
 			BT_DBG("skb %p len %d", skb, skb->len);
 
-			hci_conn_enter_active_mode(conn);
+			hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
 
 			hci_send_frame(skb);
 			hdev->acl_last_tx = jiffies;
@@ -2030,7 +2282,7 @@
 	if (conn) {
 		register struct hci_proto *hp;
 
-		hci_conn_enter_active_mode(conn);
+		hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
 
 		/* Send to upper protocol */
 		hp = hci_proto[HCI_PROTO_L2CAP];
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 77930aa..6cddd03 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,6 +45,8 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+static int enable_le;
+
 /* Handle HCI Event packets */
 
 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -525,6 +527,20 @@
 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
 }
 
+static void hci_set_le_support(struct hci_dev *hdev)
+{
+	struct hci_cp_write_le_host_supported cp;
+
+	memset(&cp, 0, sizeof(cp));
+
+	if (enable_le) {
+		cp.le = 1;
+		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+	}
+
+	hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
+}
+
 static void hci_setup(struct hci_dev *hdev)
 {
 	hci_setup_event_mask(hdev);
@@ -542,6 +558,17 @@
 
 	if (hdev->features[7] & LMP_INQ_TX_PWR)
 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+
+	if (hdev->features[7] & LMP_EXTFEATURES) {
+		struct hci_cp_read_local_ext_features cp;
+
+		cp.page = 0x01;
+		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
+							sizeof(cp), &cp);
+	}
+
+	if (hdev->features[4] & LMP_LE)
+		hci_set_le_support(hdev);
 }
 
 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -658,6 +685,21 @@
 					hdev->features[6], hdev->features[7]);
 }
 
+static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
+							struct sk_buff *skb)
+{
+	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	memcpy(hdev->extfeatures, rp->features, 8);
+
+	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
+}
+
 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -841,6 +883,72 @@
 						rp->randomizer, rp->status);
 }
 
+static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
+					struct sk_buff *skb)
+{
+	struct hci_cp_le_set_scan_enable *cp;
+	__u8 status = *((__u8 *) skb->data);
+
+	BT_DBG("%s status 0x%x", hdev->name, status);
+
+	if (status)
+		return;
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+	if (!cp)
+		return;
+
+	hci_dev_lock(hdev);
+
+	if (cp->enable == 0x01) {
+		del_timer(&hdev->adv_timer);
+		hci_adv_entries_clear(hdev);
+	} else if (cp->enable == 0x00) {
+		mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
+	}
+
+	hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
+}
+
+static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
+
+	BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+	if (rp->status)
+		return;
+
+	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
+}
+
+static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
+							struct sk_buff *skb)
+{
+	struct hci_cp_read_local_ext_features cp;
+	__u8 status = *((__u8 *) skb->data);
+
+	BT_DBG("%s status 0x%x", hdev->name, status);
+
+	if (status)
+		return;
+
+	cp.page = 0x01;
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
+}
+
 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 {
 	BT_DBG("%s status 0x%x", hdev->name, status);
@@ -885,7 +993,7 @@
 		}
 	} else {
 		if (!conn) {
-			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
+			conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
 			if (conn) {
 				conn->out = 1;
 				conn->link_mode |= HCI_LM_MASTER;
@@ -1208,17 +1316,24 @@
 		}
 	} else {
 		if (!conn) {
-			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
-			if (conn)
+			conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
+			if (conn) {
+				conn->dst_type = cp->peer_addr_type;
 				conn->out = 1;
-			else
+			} else {
 				BT_ERR("No memory for new connection");
+			}
 		}
 	}
 
 	hci_dev_unlock(hdev);
 }
 
+static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
+{
+	BT_DBG("%s status 0x%x", hdev->name, status);
+}
+
 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	__u8 status = *((__u8 *) skb->data);
@@ -1372,7 +1487,8 @@
 
 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
 		if (!conn) {
-			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+			/* pkt_type not yet used for incoming connections */
+			conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
 			if (!conn) {
 				BT_ERR("No memory for new connection");
 				hci_dev_unlock(hdev);
@@ -1462,51 +1578,58 @@
 	hci_dev_lock(hdev);
 
 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-	if (conn) {
-		if (!ev->status) {
+	if (!conn)
+		goto unlock;
+
+	if (!ev->status) {
+		if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
+				test_bit(HCI_CONN_REAUTH_PEND,	&conn->pend)) {
+			BT_INFO("re-auth of legacy device is not possible.");
+		} else {
 			conn->link_mode |= HCI_LM_AUTH;
 			conn->sec_level = conn->pending_sec_level;
-		} else {
-			mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
 		}
+	} else {
+		mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+	}
 
-		clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+	clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+	clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
 
-		if (conn->state == BT_CONFIG) {
-			if (!ev->status && hdev->ssp_mode > 0 &&
-							conn->ssp_mode > 0) {
-				struct hci_cp_set_conn_encrypt cp;
-				cp.handle  = ev->handle;
-				cp.encrypt = 0x01;
-				hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
-							sizeof(cp), &cp);
-			} else {
-				conn->state = BT_CONNECTED;
-				hci_proto_connect_cfm(conn, ev->status);
-				hci_conn_put(conn);
-			}
+	if (conn->state == BT_CONFIG) {
+		if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
+			struct hci_cp_set_conn_encrypt cp;
+			cp.handle  = ev->handle;
+			cp.encrypt = 0x01;
+			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+									&cp);
 		} else {
-			hci_auth_cfm(conn, ev->status);
-
-			hci_conn_hold(conn);
-			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+			conn->state = BT_CONNECTED;
+			hci_proto_connect_cfm(conn, ev->status);
 			hci_conn_put(conn);
 		}
+	} else {
+		hci_auth_cfm(conn, ev->status);
 
-		if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
-			if (!ev->status) {
-				struct hci_cp_set_conn_encrypt cp;
-				cp.handle  = ev->handle;
-				cp.encrypt = 0x01;
-				hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
-							sizeof(cp), &cp);
-			} else {
-				clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
-				hci_encrypt_cfm(conn, ev->status, 0x00);
-			}
+		hci_conn_hold(conn);
+		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+		hci_conn_put(conn);
+	}
+
+	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+		if (!ev->status) {
+			struct hci_cp_set_conn_encrypt cp;
+			cp.handle  = ev->handle;
+			cp.encrypt = 0x01;
+			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+									&cp);
+		} else {
+			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+			hci_encrypt_cfm(conn, ev->status, 0x00);
 		}
 	}
 
+unlock:
 	hci_dev_unlock(hdev);
 }
 
@@ -1557,6 +1680,7 @@
 				/* Encryption implies authentication */
 				conn->link_mode |= HCI_LM_AUTH;
 				conn->link_mode |= HCI_LM_ENCRYPT;
+				conn->sec_level = conn->pending_sec_level;
 			} else
 				conn->link_mode &= ~HCI_LM_ENCRYPT;
 		}
@@ -1760,6 +1884,10 @@
 		hci_cc_read_local_features(hdev, skb);
 		break;
 
+	case HCI_OP_READ_LOCAL_EXT_FEATURES:
+		hci_cc_read_local_ext_features(hdev, skb);
+		break;
+
 	case HCI_OP_READ_BUFFER_SIZE:
 		hci_cc_read_buffer_size(hdev, skb);
 		break;
@@ -1816,6 +1944,22 @@
 		hci_cc_user_confirm_neg_reply(hdev, skb);
 		break;
 
+	case HCI_OP_LE_SET_SCAN_ENABLE:
+		hci_cc_le_set_scan_enable(hdev, skb);
+		break;
+
+	case HCI_OP_LE_LTK_REPLY:
+		hci_cc_le_ltk_reply(hdev, skb);
+		break;
+
+	case HCI_OP_LE_LTK_NEG_REPLY:
+		hci_cc_le_ltk_neg_reply(hdev, skb);
+		break;
+
+	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
+		hci_cc_write_le_host_supported(hdev, skb);
+		break;
+
 	default:
 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
 		break;
@@ -1894,6 +2038,10 @@
 		hci_cs_le_create_conn(hdev, ev->status);
 		break;
 
+	case HCI_OP_LE_START_ENC:
+		hci_cs_le_start_enc(hdev, ev->status);
+		break;
+
 	default:
 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
 		break;
@@ -2333,6 +2481,7 @@
 		hci_conn_add_sysfs(conn);
 		break;
 
+	case 0x10:	/* Connection Accept Timeout */
 	case 0x11:	/* Unsupported Feature or Parameter Value */
 	case 0x1c:	/* SCO interval rejected */
 	case 0x1a:	/* Unsupported Remote Feature */
@@ -2652,12 +2801,14 @@
 
 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
 	if (!conn) {
-		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+		conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
 		if (!conn) {
 			BT_ERR("No memory for new connection");
 			hci_dev_unlock(hdev);
 			return;
 		}
+
+		conn->dst_type = ev->bdaddr_type;
 	}
 
 	if (ev->status) {
@@ -2670,6 +2821,7 @@
 
 	mgmt_connected(hdev->id, &ev->bdaddr);
 
+	conn->sec_level = BT_SECURITY_LOW;
 	conn->handle = __le16_to_cpu(ev->handle);
 	conn->state = BT_CONNECTED;
 
@@ -2682,6 +2834,64 @@
 	hci_dev_unlock(hdev);
 }
 
+static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
+						struct sk_buff *skb)
+{
+	struct hci_ev_le_advertising_info *ev;
+	u8 num_reports;
+
+	num_reports = skb->data[0];
+	ev = (void *) &skb->data[1];
+
+	hci_dev_lock(hdev);
+
+	hci_add_adv_entry(hdev, ev);
+
+	while (--num_reports) {
+		ev = (void *) (ev->data + ev->length + 1);
+		hci_add_adv_entry(hdev, ev);
+	}
+
+	hci_dev_unlock(hdev);
+}
+
+static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
+						struct sk_buff *skb)
+{
+	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
+	struct hci_cp_le_ltk_reply cp;
+	struct hci_cp_le_ltk_neg_reply neg;
+	struct hci_conn *conn;
+	struct link_key *ltk;
+
+	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
+
+	hci_dev_lock(hdev);
+
+	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+	if (conn == NULL)
+		goto not_found;
+
+	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
+	if (ltk == NULL)
+		goto not_found;
+
+	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
+	cp.handle = cpu_to_le16(conn->handle);
+	conn->pin_length = ltk->pin_len;
+
+	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+
+	hci_dev_unlock(hdev);
+
+	return;
+
+not_found:
+	neg.handle = ev->handle;
+	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
+	hci_dev_unlock(hdev);
+}
+
 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -2693,6 +2903,14 @@
 		hci_le_conn_complete_evt(hdev, skb);
 		break;
 
+	case HCI_EV_LE_ADVERTISING_REPORT:
+		hci_le_adv_report_evt(hdev, skb);
+		break;
+
+	case HCI_EV_LE_LTK_REQ:
+		hci_le_ltk_request_evt(hdev, skb);
+		break;
+
 	default:
 		break;
 	}
@@ -2886,3 +3104,6 @@
 	hci_send_to_sock(hdev, skb, NULL);
 	kfree_skb(skb);
 }
+
+module_param(enable_le, bool, 0444);
+MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 295e4a8..ff02cf5 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -180,82 +180,24 @@
 	return 0;
 }
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
-	struct list_head *p;
-
-	list_for_each(p, &hdev->blacklist) {
-		struct bdaddr_list *b;
-
-		b = list_entry(p, struct bdaddr_list, list);
-
-		if (bacmp(bdaddr, &b->bdaddr) == 0)
-			return b;
-	}
-
-	return NULL;
-}
-
-static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 {
 	bdaddr_t bdaddr;
-	struct bdaddr_list *entry;
 
 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 		return -EFAULT;
 
-	if (bacmp(&bdaddr, BDADDR_ANY) == 0)
-		return -EBADF;
-
-	if (hci_blacklist_lookup(hdev, &bdaddr))
-		return -EEXIST;
-
-	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-
-	bacpy(&entry->bdaddr, &bdaddr);
-
-	list_add(&entry->list, &hdev->blacklist);
-
-	return 0;
+	return hci_blacklist_add(hdev, &bdaddr);
 }
 
-int hci_blacklist_clear(struct hci_dev *hdev)
-{
-	struct list_head *p, *n;
-
-	list_for_each_safe(p, n, &hdev->blacklist) {
-		struct bdaddr_list *b;
-
-		b = list_entry(p, struct bdaddr_list, list);
-
-		list_del(p);
-		kfree(b);
-	}
-
-	return 0;
-}
-
-static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 {
 	bdaddr_t bdaddr;
-	struct bdaddr_list *entry;
 
 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
 		return -EFAULT;
 
-	if (bacmp(&bdaddr, BDADDR_ANY) == 0)
-		return hci_blacklist_clear(hdev);
-
-	entry = hci_blacklist_lookup(hdev, &bdaddr);
-	if (!entry)
-		return -ENOENT;
-
-	list_del(&entry->list);
-	kfree(entry);
-
-	return 0;
+	return hci_blacklist_del(hdev, &bdaddr);
 }
 
 /* Ioctls that require bound socket */
@@ -290,12 +232,12 @@
 	case HCIBLOCKADDR:
 		if (!capable(CAP_NET_ADMIN))
 			return -EACCES;
-		return hci_blacklist_add(hdev, (void __user *) arg);
+		return hci_sock_blacklist_add(hdev, (void __user *) arg);
 
 	case HCIUNBLOCKADDR:
 		if (!capable(CAP_NET_ADMIN))
 			return -EACCES;
-		return hci_blacklist_del(hdev, (void __user *) arg);
+		return hci_sock_blacklist_del(hdev, (void __user *) arg);
 
 	default:
 		if (hdev->ioctl)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 56fdd91..9c7bccf 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -54,26 +54,39 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/smp.h>
 
 int disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
 static u8 l2cap_fixed_chan[8] = { 0x02, };
 
-static struct workqueue_struct *_busy_wq;
-
-LIST_HEAD(chan_list);
-DEFINE_RWLOCK(chan_list_lock);
-
-static void l2cap_busy_work(struct work_struct *work);
+static LIST_HEAD(chan_list);
+static DEFINE_RWLOCK(chan_list_lock);
 
 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
 				u8 code, u8 ident, u16 dlen, void *data);
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+								void *data);
 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static void l2cap_send_disconn_req(struct l2cap_conn *conn,
+				struct l2cap_chan *chan, int err);
 
 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
 
 /* ---- L2CAP channels ---- */
+
+static inline void chan_hold(struct l2cap_chan *c)
+{
+	atomic_inc(&c->refcnt);
+}
+
+static inline void chan_put(struct l2cap_chan *c)
+{
+	if (atomic_dec_and_test(&c->refcnt))
+		kfree(c);
+}
+
 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
 {
 	struct l2cap_chan *c;
@@ -204,6 +217,62 @@
 	return 0;
 }
 
+static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
+{
+	BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
+
+	if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
+		chan_hold(chan);
+}
+
+static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
+{
+	BT_DBG("chan %p state %d", chan, chan->state);
+
+	if (timer_pending(timer) && del_timer(timer))
+		chan_put(chan);
+}
+
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
+{
+	chan->state = state;
+	chan->ops->state_change(chan->data, state);
+}
+
+static void l2cap_chan_timeout(unsigned long arg)
+{
+	struct l2cap_chan *chan = (struct l2cap_chan *) arg;
+	struct sock *sk = chan->sk;
+	int reason;
+
+	BT_DBG("chan %p state %d", chan, chan->state);
+
+	bh_lock_sock(sk);
+
+	if (sock_owned_by_user(sk)) {
+		/* sk is owned by user. Try again later */
+		__set_chan_timer(chan, HZ / 5);
+		bh_unlock_sock(sk);
+		chan_put(chan);
+		return;
+	}
+
+	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
+		reason = ECONNREFUSED;
+	else if (chan->state == BT_CONNECT &&
+					chan->sec_level != BT_SECURITY_SDP)
+		reason = ECONNREFUSED;
+	else
+		reason = ETIMEDOUT;
+
+	l2cap_chan_close(chan, reason);
+
+	bh_unlock_sock(sk);
+
+	chan->ops->close(chan->data);
+	chan_put(chan);
+}
+
 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
 {
 	struct l2cap_chan *chan;
@@ -218,6 +287,12 @@
 	list_add(&chan->global_l, &chan_list);
 	write_unlock_bh(&chan_list_lock);
 
+	setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
+
+	chan->state = BT_OPEN;
+
+	atomic_set(&chan->refcnt, 1);
+
 	return chan;
 }
 
@@ -227,13 +302,11 @@
 	list_del(&chan->global_l);
 	write_unlock_bh(&chan_list_lock);
 
-	kfree(chan);
+	chan_put(chan);
 }
 
 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 {
-	struct sock *sk = chan->sk;
-
 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
 			chan->psm, chan->dcid);
 
@@ -241,7 +314,7 @@
 
 	chan->conn = conn;
 
-	if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
+	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
 		if (conn->hcon->type == LE_LINK) {
 			/* LE connection */
 			chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -252,7 +325,7 @@
 			chan->scid = l2cap_alloc_cid(conn);
 			chan->omtu = L2CAP_DEFAULT_MTU;
 		}
-	} else if (sk->sk_type == SOCK_DGRAM) {
+	} else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
 		/* Connectionless socket */
 		chan->scid = L2CAP_CID_CONN_LESS;
 		chan->dcid = L2CAP_CID_CONN_LESS;
@@ -264,20 +337,20 @@
 		chan->omtu = L2CAP_DEFAULT_MTU;
 	}
 
-	sock_hold(sk);
+	chan_hold(chan);
 
 	list_add(&chan->list, &conn->chan_l);
 }
 
 /* Delete channel.
  * Must be called on the locked socket. */
-void l2cap_chan_del(struct l2cap_chan *chan, int err)
+static void l2cap_chan_del(struct l2cap_chan *chan, int err)
 {
 	struct sock *sk = chan->sk;
 	struct l2cap_conn *conn = chan->conn;
 	struct sock *parent = bt_sk(sk)->parent;
 
-	l2cap_sock_clear_timer(sk);
+	__clear_chan_timer(chan);
 
 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
 
@@ -286,13 +359,13 @@
 		write_lock_bh(&conn->chan_lock);
 		list_del(&chan->list);
 		write_unlock_bh(&conn->chan_lock);
-		__sock_put(sk);
+		chan_put(chan);
 
 		chan->conn = NULL;
 		hci_conn_put(conn->hcon);
 	}
 
-	sk->sk_state = BT_CLOSED;
+	l2cap_state_change(chan, BT_CLOSED);
 	sock_set_flag(sk, SOCK_ZAPPED);
 
 	if (err)
@@ -304,8 +377,8 @@
 	} else
 		sk->sk_state_change(sk);
 
-	if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
-			chan->conf_state & L2CAP_CONF_INPUT_DONE))
+	if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
+			test_bit(CONF_INPUT_DONE, &chan->conf_state)))
 		return;
 
 	skb_queue_purge(&chan->tx_q);
@@ -313,12 +386,11 @@
 	if (chan->mode == L2CAP_MODE_ERTM) {
 		struct srej_list *l, *tmp;
 
-		del_timer(&chan->retrans_timer);
-		del_timer(&chan->monitor_timer);
-		del_timer(&chan->ack_timer);
+		__clear_retrans_timer(chan);
+		__clear_monitor_timer(chan);
+		__clear_ack_timer(chan);
 
 		skb_queue_purge(&chan->srej_q);
-		skb_queue_purge(&chan->busy_q);
 
 		list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
 			list_del(&l->list);
@@ -327,11 +399,86 @@
 	}
 }
 
-static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
+static void l2cap_chan_cleanup_listen(struct sock *parent)
 {
+	struct sock *sk;
+
+	BT_DBG("parent %p", parent);
+
+	/* Close not yet accepted channels */
+	while ((sk = bt_accept_dequeue(parent, NULL))) {
+		struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+		__clear_chan_timer(chan);
+		lock_sock(sk);
+		l2cap_chan_close(chan, ECONNRESET);
+		release_sock(sk);
+		chan->ops->close(chan->data);
+	}
+}
+
+void l2cap_chan_close(struct l2cap_chan *chan, int reason)
+{
+	struct l2cap_conn *conn = chan->conn;
 	struct sock *sk = chan->sk;
 
-	if (sk->sk_type == SOCK_RAW) {
+	BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
+
+	switch (chan->state) {
+	case BT_LISTEN:
+		l2cap_chan_cleanup_listen(sk);
+
+		l2cap_state_change(chan, BT_CLOSED);
+		sock_set_flag(sk, SOCK_ZAPPED);
+		break;
+
+	case BT_CONNECTED:
+	case BT_CONFIG:
+		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
+					conn->hcon->type == ACL_LINK) {
+			__clear_chan_timer(chan);
+			__set_chan_timer(chan, sk->sk_sndtimeo);
+			l2cap_send_disconn_req(conn, chan, reason);
+		} else
+			l2cap_chan_del(chan, reason);
+		break;
+
+	case BT_CONNECT2:
+		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
+					conn->hcon->type == ACL_LINK) {
+			struct l2cap_conn_rsp rsp;
+			__u16 result;
+
+			if (bt_sk(sk)->defer_setup)
+				result = L2CAP_CR_SEC_BLOCK;
+			else
+				result = L2CAP_CR_BAD_PSM;
+			l2cap_state_change(chan, BT_DISCONN);
+
+			rsp.scid   = cpu_to_le16(chan->dcid);
+			rsp.dcid   = cpu_to_le16(chan->scid);
+			rsp.result = cpu_to_le16(result);
+			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+							sizeof(rsp), &rsp);
+		}
+
+		l2cap_chan_del(chan, reason);
+		break;
+
+	case BT_CONNECT:
+	case BT_DISCONN:
+		l2cap_chan_del(chan, reason);
+		break;
+
+	default:
+		sock_set_flag(sk, SOCK_ZAPPED);
+		break;
+	}
+}
+
+static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
+{
+	if (chan->chan_type == L2CAP_CHAN_RAW) {
 		switch (chan->sec_level) {
 		case BT_SECURITY_HIGH:
 			return HCI_AT_DEDICATED_BONDING_MITM;
@@ -371,7 +518,7 @@
 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
 }
 
-u8 l2cap_get_ident(struct l2cap_conn *conn)
+static u8 l2cap_get_ident(struct l2cap_conn *conn)
 {
 	u8 id;
 
@@ -393,7 +540,7 @@
 	return id;
 }
 
-void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
 {
 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
 	u8 flags;
@@ -408,6 +555,8 @@
 	else
 		flags = ACL_START;
 
+	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+
 	hci_send_acl(conn->hcon, skb, flags);
 }
 
@@ -415,13 +564,11 @@
 {
 	struct sk_buff *skb;
 	struct l2cap_hdr *lh;
-	struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
 	struct l2cap_conn *conn = chan->conn;
-	struct sock *sk = (struct sock *)pi;
 	int count, hlen = L2CAP_HDR_SIZE + 2;
 	u8 flags;
 
-	if (sk->sk_state != BT_CONNECTED)
+	if (chan->state != BT_CONNECTED)
 		return;
 
 	if (chan->fcs == L2CAP_FCS_CRC16)
@@ -432,15 +579,11 @@
 	count = min_t(unsigned int, conn->mtu, hlen);
 	control |= L2CAP_CTRL_FRAME_TYPE;
 
-	if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
+	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
 		control |= L2CAP_CTRL_FINAL;
-		chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-	}
 
-	if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
+	if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
 		control |= L2CAP_CTRL_POLL;
-		chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
-	}
 
 	skb = bt_skb_alloc(count, GFP_ATOMIC);
 	if (!skb)
@@ -461,14 +604,16 @@
 	else
 		flags = ACL_START;
 
+	bt_cb(skb)->force_active = chan->force_active;
+
 	hci_send_acl(chan->conn->hcon, skb, flags);
 }
 
 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
 {
-	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
 		control |= L2CAP_SUPER_RCV_NOT_READY;
-		chan->conn_state |= L2CAP_CONN_RNR_SENT;
+		set_bit(CONN_RNR_SENT, &chan->conn_state);
 	} else
 		control |= L2CAP_SUPER_RCV_READY;
 
@@ -479,7 +624,7 @@
 
 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
 {
-	return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
+	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
 }
 
 static void l2cap_do_start(struct l2cap_chan *chan)
@@ -497,7 +642,7 @@
 			req.psm  = chan->psm;
 
 			chan->ident = l2cap_get_ident(conn);
-			chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
+			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
 
 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
 							sizeof(req), &req);
@@ -533,7 +678,7 @@
 	}
 }
 
-void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
+static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
 {
 	struct sock *sk;
 	struct l2cap_disconn_req req;
@@ -544,9 +689,9 @@
 	sk = chan->sk;
 
 	if (chan->mode == L2CAP_MODE_ERTM) {
-		del_timer(&chan->retrans_timer);
-		del_timer(&chan->monitor_timer);
-		del_timer(&chan->ack_timer);
+		__clear_retrans_timer(chan);
+		__clear_monitor_timer(chan);
+		__clear_ack_timer(chan);
 	}
 
 	req.dcid = cpu_to_le16(chan->dcid);
@@ -554,7 +699,7 @@
 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
 			L2CAP_DISCONN_REQ, sizeof(req), &req);
 
-	sk->sk_state = BT_DISCONN;
+	l2cap_state_change(chan, BT_DISCONN);
 	sk->sk_err = err;
 }
 
@@ -572,13 +717,12 @@
 
 		bh_lock_sock(sk);
 
-		if (sk->sk_type != SOCK_SEQPACKET &&
-				sk->sk_type != SOCK_STREAM) {
+		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
 			bh_unlock_sock(sk);
 			continue;
 		}
 
-		if (sk->sk_state == BT_CONNECT) {
+		if (chan->state == BT_CONNECT) {
 			struct l2cap_conn_req req;
 
 			if (!l2cap_check_security(chan) ||
@@ -587,15 +731,14 @@
 				continue;
 			}
 
-			if (!l2cap_mode_supported(chan->mode,
-					conn->feat_mask)
-					&& chan->conf_state &
-					L2CAP_CONF_STATE2_DEVICE) {
-				/* __l2cap_sock_close() calls list_del(chan)
+			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
+					&& test_bit(CONF_STATE2_DEVICE,
+					&chan->conf_state)) {
+				/* l2cap_chan_close() calls list_del(chan)
 				 * so release the lock */
-				read_unlock_bh(&conn->chan_lock);
-				 __l2cap_sock_close(sk, ECONNRESET);
-				read_lock_bh(&conn->chan_lock);
+				read_unlock(&conn->chan_lock);
+				l2cap_chan_close(chan, ECONNRESET);
+				read_lock(&conn->chan_lock);
 				bh_unlock_sock(sk);
 				continue;
 			}
@@ -604,12 +747,12 @@
 			req.psm  = chan->psm;
 
 			chan->ident = l2cap_get_ident(conn);
-			chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
+			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
 
 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
 							sizeof(req), &req);
 
-		} else if (sk->sk_state == BT_CONNECT2) {
+		} else if (chan->state == BT_CONNECT2) {
 			struct l2cap_conn_rsp rsp;
 			char buf[128];
 			rsp.scid = cpu_to_le16(chan->dcid);
@@ -620,10 +763,11 @@
 					struct sock *parent = bt_sk(sk)->parent;
 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
-					parent->sk_data_ready(parent, 0);
+					if (parent)
+						parent->sk_data_ready(parent, 0);
 
 				} else {
-					sk->sk_state = BT_CONFIG;
+					l2cap_state_change(chan, BT_CONFIG);
 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
 				}
@@ -635,13 +779,13 @@
 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
 							sizeof(rsp), &rsp);
 
-			if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
+			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
 					rsp.result != L2CAP_CR_SUCCESS) {
 				bh_unlock_sock(sk);
 				continue;
 			}
 
-			chan->conf_state |= L2CAP_CONF_REQ_SENT;
+			set_bit(CONF_REQ_SENT, &chan->conf_state);
 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
 						l2cap_build_conf_req(chan, buf), buf);
 			chan->num_conf_req++;
@@ -665,7 +809,7 @@
 	list_for_each_entry(c, &chan_list, global_l) {
 		struct sock *sk = c->sk;
 
-		if (state && sk->sk_state != state)
+		if (state && c->state != state)
 			continue;
 
 		if (c->scid == cid) {
@@ -709,24 +853,16 @@
 		goto clean;
 	}
 
-	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
-	if (!sk)
+	chan = pchan->ops->new_connection(pchan->data);
+	if (!chan)
 		goto clean;
 
-	chan = l2cap_chan_create(sk);
-	if (!chan) {
-		l2cap_sock_kill(sk);
-		goto clean;
-	}
-
-	l2cap_pi(sk)->chan = chan;
+	sk = chan->sk;
 
 	write_lock_bh(&conn->chan_lock);
 
 	hci_conn_hold(conn->hcon);
 
-	l2cap_sock_init(sk, parent);
-
 	bacpy(&bt_sk(sk)->src, conn->src);
 	bacpy(&bt_sk(sk)->dst, conn->dst);
 
@@ -734,9 +870,9 @@
 
 	__l2cap_chan_add(conn, chan);
 
-	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+	__set_chan_timer(chan, sk->sk_sndtimeo);
 
-	sk->sk_state = BT_CONNECTED;
+	l2cap_state_change(chan, BT_CONNECTED);
 	parent->sk_data_ready(parent, 0);
 
 	write_unlock_bh(&conn->chan_lock);
@@ -745,6 +881,23 @@
 	bh_unlock_sock(parent);
 }
 
+static void l2cap_chan_ready(struct sock *sk)
+{
+	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+	struct sock *parent = bt_sk(sk)->parent;
+
+	BT_DBG("sk %p, parent %p", sk, parent);
+
+	chan->conf_state = 0;
+	__clear_chan_timer(chan);
+
+	l2cap_state_change(chan, BT_CONNECTED);
+	sk->sk_state_change(sk);
+
+	if (parent)
+		parent->sk_data_ready(parent, 0);
+}
+
 static void l2cap_conn_ready(struct l2cap_conn *conn)
 {
 	struct l2cap_chan *chan;
@@ -762,17 +915,15 @@
 		bh_lock_sock(sk);
 
 		if (conn->hcon->type == LE_LINK) {
-			l2cap_sock_clear_timer(sk);
-			sk->sk_state = BT_CONNECTED;
-			sk->sk_state_change(sk);
-		}
+			if (smp_conn_security(conn, chan->sec_level))
+				l2cap_chan_ready(sk);
 
-		if (sk->sk_type != SOCK_SEQPACKET &&
-				sk->sk_type != SOCK_STREAM) {
-			l2cap_sock_clear_timer(sk);
-			sk->sk_state = BT_CONNECTED;
+		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+			__clear_chan_timer(chan);
+			l2cap_state_change(chan, BT_CONNECTED);
 			sk->sk_state_change(sk);
-		} else if (sk->sk_state == BT_CONNECT)
+
+		} else if (chan->state == BT_CONNECT)
 			l2cap_do_start(chan);
 
 		bh_unlock_sock(sk);
@@ -810,6 +961,45 @@
 	l2cap_conn_start(conn);
 }
 
+static void l2cap_conn_del(struct hci_conn *hcon, int err)
+{
+	struct l2cap_conn *conn = hcon->l2cap_data;
+	struct l2cap_chan *chan, *l;
+	struct sock *sk;
+
+	if (!conn)
+		return;
+
+	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+	kfree_skb(conn->rx_skb);
+
+	/* Kill channels */
+	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+		sk = chan->sk;
+		bh_lock_sock(sk);
+		l2cap_chan_del(chan, err);
+		bh_unlock_sock(sk);
+		chan->ops->close(chan->data);
+	}
+
+	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+		del_timer_sync(&conn->info_timer);
+
+	if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+		del_timer(&conn->security_timer);
+
+	hcon->l2cap_data = NULL;
+	kfree(conn);
+}
+
+static void security_timeout(unsigned long arg)
+{
+	struct l2cap_conn *conn = (void *) arg;
+
+	l2cap_conn_del(conn->hcon, ETIMEDOUT);
+}
+
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
 {
 	struct l2cap_conn *conn = hcon->l2cap_data;
@@ -841,7 +1031,10 @@
 
 	INIT_LIST_HEAD(&conn->chan_l);
 
-	if (hcon->type != LE_LINK)
+	if (hcon->type == LE_LINK)
+		setup_timer(&conn->security_timer, security_timeout,
+						(unsigned long) conn);
+	else
 		setup_timer(&conn->info_timer, l2cap_info_timeout,
 						(unsigned long) conn);
 
@@ -850,35 +1043,6 @@
 	return conn;
 }
 
-static void l2cap_conn_del(struct hci_conn *hcon, int err)
-{
-	struct l2cap_conn *conn = hcon->l2cap_data;
-	struct l2cap_chan *chan, *l;
-	struct sock *sk;
-
-	if (!conn)
-		return;
-
-	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
-
-	kfree_skb(conn->rx_skb);
-
-	/* Kill channels */
-	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
-		sk = chan->sk;
-		bh_lock_sock(sk);
-		l2cap_chan_del(chan, err);
-		bh_unlock_sock(sk);
-		l2cap_sock_kill(sk);
-	}
-
-	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
-		del_timer_sync(&conn->info_timer);
-
-	hcon->l2cap_data = NULL;
-	kfree(conn);
-}
-
 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 {
 	write_lock_bh(&conn->chan_lock);
@@ -900,7 +1064,7 @@
 	list_for_each_entry(c, &chan_list, global_l) {
 		struct sock *sk = c->sk;
 
-		if (state && sk->sk_state != state)
+		if (state && c->state != state)
 			continue;
 
 		if (c->psm == psm) {
@@ -944,10 +1108,10 @@
 	auth_type = l2cap_get_auth_type(chan);
 
 	if (chan->dcid == L2CAP_CID_LE_DATA)
-		hcon = hci_connect(hdev, LE_LINK, dst,
+		hcon = hci_connect(hdev, LE_LINK, 0, dst,
 					chan->sec_level, auth_type);
 	else
-		hcon = hci_connect(hdev, ACL_LINK, dst,
+		hcon = hci_connect(hdev, ACL_LINK, 0, dst,
 					chan->sec_level, auth_type);
 
 	if (IS_ERR(hcon)) {
@@ -967,15 +1131,14 @@
 
 	l2cap_chan_add(conn, chan);
 
-	sk->sk_state = BT_CONNECT;
-	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+	l2cap_state_change(chan, BT_CONNECT);
+	__set_chan_timer(chan, sk->sk_sndtimeo);
 
 	if (hcon->state == BT_CONNECTED) {
-		if (sk->sk_type != SOCK_SEQPACKET &&
-				sk->sk_type != SOCK_STREAM) {
-			l2cap_sock_clear_timer(sk);
+		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+			__clear_chan_timer(chan);
 			if (l2cap_check_security(chan))
-				sk->sk_state = BT_CONNECTED;
+				l2cap_state_change(chan, BT_CONNECTED);
 		} else
 			l2cap_do_start(chan);
 	}
@@ -1035,7 +1198,7 @@
 	}
 
 	chan->retry_count++;
-	__mod_monitor_timer();
+	__set_monitor_timer(chan);
 
 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
 	bh_unlock_sock(sk);
@@ -1050,9 +1213,9 @@
 
 	bh_lock_sock(sk);
 	chan->retry_count = 1;
-	__mod_monitor_timer();
+	__set_monitor_timer(chan);
 
-	chan->conn_state |= L2CAP_CONN_WAIT_F;
+	set_bit(CONN_WAIT_F, &chan->conn_state);
 
 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
 	bh_unlock_sock(sk);
@@ -1074,7 +1237,7 @@
 	}
 
 	if (!chan->unacked_frames)
-		del_timer(&chan->retrans_timer);
+		__clear_retrans_timer(chan);
 }
 
 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -1089,6 +1252,7 @@
 	else
 		flags = ACL_START;
 
+	bt_cb(skb)->force_active = chan->force_active;
 	hci_send_acl(hcon, skb, flags);
 }
 
@@ -1142,10 +1306,8 @@
 	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
 	control &= L2CAP_CTRL_SAR;
 
-	if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
+	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
 		control |= L2CAP_CTRL_FINAL;
-		chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-	}
 
 	control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
 			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
@@ -1163,11 +1325,10 @@
 int l2cap_ertm_send(struct l2cap_chan *chan)
 {
 	struct sk_buff *skb, *tx_skb;
-	struct sock *sk = chan->sk;
 	u16 control, fcs;
 	int nsent = 0;
 
-	if (sk->sk_state != BT_CONNECTED)
+	if (chan->state != BT_CONNECTED)
 		return -ENOTCONN;
 
 	while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
@@ -1185,10 +1346,9 @@
 		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
 		control &= L2CAP_CTRL_SAR;
 
-		if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
+		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
 			control |= L2CAP_CTRL_FINAL;
-			chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
-		}
+
 		control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
 				| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
 		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1201,7 +1361,7 @@
 
 		l2cap_do_send(chan, tx_skb);
 
-		__mod_retrans_timer();
+		__set_retrans_timer(chan);
 
 		bt_cb(skb)->tx_seq = chan->next_tx_seq;
 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
@@ -1240,9 +1400,9 @@
 
 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
 
-	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
 		control |= L2CAP_SUPER_RCV_NOT_READY;
-		chan->conn_state |= L2CAP_CONN_RNR_SENT;
+		set_bit(CONN_RNR_SENT, &chan->conn_state);
 		l2cap_send_sframe(chan, control);
 		return;
 	}
@@ -1450,28 +1610,83 @@
 	return size;
 }
 
-static void l2cap_chan_ready(struct sock *sk)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
 {
-	struct sock *parent = bt_sk(sk)->parent;
-	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+	struct sk_buff *skb;
+	u16 control;
+	int err;
 
-	BT_DBG("sk %p, parent %p", sk, parent);
+	/* Connectionless channel */
+	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
+		skb = l2cap_create_connless_pdu(chan, msg, len);
+		if (IS_ERR(skb))
+			return PTR_ERR(skb);
 
-	chan->conf_state = 0;
-	l2cap_sock_clear_timer(sk);
-
-	if (!parent) {
-		/* Outgoing channel.
-		 * Wake up socket sleeping on connect.
-		 */
-		sk->sk_state = BT_CONNECTED;
-		sk->sk_state_change(sk);
-	} else {
-		/* Incoming channel.
-		 * Wake up socket sleeping on accept.
-		 */
-		parent->sk_data_ready(parent, 0);
+		l2cap_do_send(chan, skb);
+		return len;
 	}
+
+	switch (chan->mode) {
+	case L2CAP_MODE_BASIC:
+		/* Check outgoing MTU */
+		if (len > chan->omtu)
+			return -EMSGSIZE;
+
+		/* Create a basic PDU */
+		skb = l2cap_create_basic_pdu(chan, msg, len);
+		if (IS_ERR(skb))
+			return PTR_ERR(skb);
+
+		l2cap_do_send(chan, skb);
+		err = len;
+		break;
+
+	case L2CAP_MODE_ERTM:
+	case L2CAP_MODE_STREAMING:
+		/* Entire SDU fits into one PDU */
+		if (len <= chan->remote_mps) {
+			control = L2CAP_SDU_UNSEGMENTED;
+			skb = l2cap_create_iframe_pdu(chan, msg, len, control,
+									0);
+			if (IS_ERR(skb))
+				return PTR_ERR(skb);
+
+			__skb_queue_tail(&chan->tx_q, skb);
+
+			if (chan->tx_send_head == NULL)
+				chan->tx_send_head = skb;
+
+		} else {
+			/* Segment SDU into multiples PDUs */
+			err = l2cap_sar_segment_sdu(chan, msg, len);
+			if (err < 0)
+				return err;
+		}
+
+		if (chan->mode == L2CAP_MODE_STREAMING) {
+			l2cap_streaming_send(chan);
+			err = len;
+			break;
+		}
+
+		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
+				test_bit(CONN_WAIT_F, &chan->conn_state)) {
+			err = len;
+			break;
+		}
+
+		err = l2cap_ertm_send(chan);
+		if (err >= 0)
+			err = len;
+
+		break;
+
+	default:
+		BT_DBG("bad state %1.1x", chan->mode);
+		err = -EBADFD;
+	}
+
+	return err;
 }
 
 /* Copy frame to all raw sockets on that connection */
@@ -1485,7 +1700,7 @@
 	read_lock(&conn->chan_lock);
 	list_for_each_entry(chan, &conn->chan_l, list) {
 		struct sock *sk = chan->sk;
-		if (sk->sk_type != SOCK_RAW)
+		if (chan->chan_type != L2CAP_CHAN_RAW)
 			continue;
 
 		/* Don't send frame to the socket it came from */
@@ -1495,7 +1710,7 @@
 		if (!nskb)
 			continue;
 
-		if (sock_queue_rcv_skb(sk, nskb))
+		if (chan->ops->recv(chan->data, nskb))
 			kfree_skb(nskb);
 	}
 	read_unlock(&conn->chan_lock);
@@ -1654,11 +1869,9 @@
 	setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
 
 	skb_queue_head_init(&chan->srej_q);
-	skb_queue_head_init(&chan->busy_q);
 
 	INIT_LIST_HEAD(&chan->srej_l);
 
-	INIT_WORK(&chan->busy_work, l2cap_busy_work);
 
 	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
 }
@@ -1690,7 +1903,7 @@
 	switch (chan->mode) {
 	case L2CAP_MODE_STREAMING:
 	case L2CAP_MODE_ERTM:
-		if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
+		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
 			break;
 
 		/* fall through */
@@ -1737,7 +1950,7 @@
 			break;
 
 		if (chan->fcs == L2CAP_FCS_NONE ||
-				chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
 			chan->fcs = L2CAP_FCS_NONE;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
 		}
@@ -1760,7 +1973,7 @@
 			break;
 
 		if (chan->fcs == L2CAP_FCS_NONE ||
-				chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
 			chan->fcs = L2CAP_FCS_NONE;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
 		}
@@ -1812,7 +2025,7 @@
 
 		case L2CAP_CONF_FCS:
 			if (val == L2CAP_FCS_NONE)
-				chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
+				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
 
 			break;
 
@@ -1832,7 +2045,7 @@
 	switch (chan->mode) {
 	case L2CAP_MODE_STREAMING:
 	case L2CAP_MODE_ERTM:
-		if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
+		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
 			chan->mode = l2cap_select_mode(rfc.mode,
 					chan->conn->feat_mask);
 			break;
@@ -1865,14 +2078,14 @@
 			result = L2CAP_CONF_UNACCEPT;
 		else {
 			chan->omtu = mtu;
-			chan->conf_state |= L2CAP_CONF_MTU_DONE;
+			set_bit(CONF_MTU_DONE, &chan->conf_state);
 		}
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
 
 		switch (rfc.mode) {
 		case L2CAP_MODE_BASIC:
 			chan->fcs = L2CAP_FCS_NONE;
-			chan->conf_state |= L2CAP_CONF_MODE_DONE;
+			set_bit(CONF_MODE_DONE, &chan->conf_state);
 			break;
 
 		case L2CAP_MODE_ERTM:
@@ -1889,7 +2102,7 @@
 			rfc.monitor_timeout =
 				le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
 
-			chan->conf_state |= L2CAP_CONF_MODE_DONE;
+			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
 					sizeof(rfc), (unsigned long) &rfc);
@@ -1902,7 +2115,7 @@
 
 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
 
-			chan->conf_state |= L2CAP_CONF_MODE_DONE;
+			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
 					sizeof(rfc), (unsigned long) &rfc);
@@ -1917,7 +2130,7 @@
 		}
 
 		if (result == L2CAP_CONF_SUCCESS)
-			chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
 	}
 	rsp->scid   = cpu_to_le16(chan->dcid);
 	rsp->result = cpu_to_le16(result);
@@ -1959,7 +2172,7 @@
 			if (olen == sizeof(rfc))
 				memcpy(&rfc, (void *)val, olen);
 
-			if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
+			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
 							rfc.mode != chan->mode)
 				return -ECONNREFUSED;
 
@@ -2021,10 +2234,9 @@
 	l2cap_send_cmd(conn, chan->ident,
 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
 
-	if (chan->conf_state & L2CAP_CONF_REQ_SENT)
+	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
 		return;
 
-	chan->conf_state |= L2CAP_CONF_REQ_SENT;
 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
 			l2cap_build_conf_req(chan, buf), buf);
 	chan->num_conf_req++;
@@ -2124,17 +2336,11 @@
 		goto response;
 	}
 
-	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
-	if (!sk)
+	chan = pchan->ops->new_connection(pchan->data);
+	if (!chan)
 		goto response;
 
-	chan = l2cap_chan_create(sk);
-	if (!chan) {
-		l2cap_sock_kill(sk);
-		goto response;
-	}
-
-	l2cap_pi(sk)->chan = chan;
+	sk = chan->sk;
 
 	write_lock_bh(&conn->chan_lock);
 
@@ -2142,13 +2348,12 @@
 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
 		write_unlock_bh(&conn->chan_lock);
 		sock_set_flag(sk, SOCK_ZAPPED);
-		l2cap_sock_kill(sk);
+		chan->ops->close(chan->data);
 		goto response;
 	}
 
 	hci_conn_hold(conn->hcon);
 
-	l2cap_sock_init(sk, parent);
 	bacpy(&bt_sk(sk)->src, conn->src);
 	bacpy(&bt_sk(sk)->dst, conn->dst);
 	chan->psm  = psm;
@@ -2160,29 +2365,29 @@
 
 	dcid = chan->scid;
 
-	l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+	__set_chan_timer(chan, sk->sk_sndtimeo);
 
 	chan->ident = cmd->ident;
 
 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
 		if (l2cap_check_security(chan)) {
 			if (bt_sk(sk)->defer_setup) {
-				sk->sk_state = BT_CONNECT2;
+				l2cap_state_change(chan, BT_CONNECT2);
 				result = L2CAP_CR_PEND;
 				status = L2CAP_CS_AUTHOR_PEND;
 				parent->sk_data_ready(parent, 0);
 			} else {
-				sk->sk_state = BT_CONFIG;
+				l2cap_state_change(chan, BT_CONFIG);
 				result = L2CAP_CR_SUCCESS;
 				status = L2CAP_CS_NO_INFO;
 			}
 		} else {
-			sk->sk_state = BT_CONNECT2;
+			l2cap_state_change(chan, BT_CONNECT2);
 			result = L2CAP_CR_PEND;
 			status = L2CAP_CS_AUTHEN_PEND;
 		}
 	} else {
-		sk->sk_state = BT_CONNECT2;
+		l2cap_state_change(chan, BT_CONNECT2);
 		result = L2CAP_CR_PEND;
 		status = L2CAP_CS_NO_INFO;
 	}
@@ -2213,10 +2418,10 @@
 					L2CAP_INFO_REQ, sizeof(info), &info);
 	}
 
-	if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
+	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
 				result == L2CAP_CR_SUCCESS) {
 		u8 buf[128];
-		chan->conf_state |= L2CAP_CONF_REQ_SENT;
+		set_bit(CONF_REQ_SENT, &chan->conf_state);
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
 					l2cap_build_conf_req(chan, buf), buf);
 		chan->num_conf_req++;
@@ -2254,31 +2459,29 @@
 
 	switch (result) {
 	case L2CAP_CR_SUCCESS:
-		sk->sk_state = BT_CONFIG;
+		l2cap_state_change(chan, BT_CONFIG);
 		chan->ident = 0;
 		chan->dcid = dcid;
-		chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
+		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
 
-		if (chan->conf_state & L2CAP_CONF_REQ_SENT)
+		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
 			break;
 
-		chan->conf_state |= L2CAP_CONF_REQ_SENT;
-
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
 					l2cap_build_conf_req(chan, req), req);
 		chan->num_conf_req++;
 		break;
 
 	case L2CAP_CR_PEND:
-		chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
+		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
 		break;
 
 	default:
 		/* don't delete l2cap channel if sk is owned by user */
 		if (sock_owned_by_user(sk)) {
-			sk->sk_state = BT_DISCONN;
-			l2cap_sock_clear_timer(sk);
-			l2cap_sock_set_timer(sk, HZ / 5);
+			l2cap_state_change(chan, BT_DISCONN);
+			__clear_chan_timer(chan);
+			__set_chan_timer(chan, HZ / 5);
 			break;
 		}
 
@@ -2292,14 +2495,12 @@
 
 static inline void set_default_fcs(struct l2cap_chan *chan)
 {
-	struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
-
 	/* FCS is enabled only in ERTM or streaming mode, if one or both
 	 * sides request it.
 	 */
 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
 		chan->fcs = L2CAP_FCS_NONE;
-	else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
+	else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
 		chan->fcs = L2CAP_FCS_CRC16;
 }
 
@@ -2323,7 +2524,7 @@
 
 	sk = chan->sk;
 
-	if (sk->sk_state != BT_CONFIG) {
+	if (chan->state != BT_CONFIG) {
 		struct l2cap_cmd_rej rej;
 
 		rej.reason = cpu_to_le16(0x0002);
@@ -2366,13 +2567,13 @@
 	/* Reset config buffer. */
 	chan->conf_len = 0;
 
-	if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
+	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
 		goto unlock;
 
-	if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
+	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
 		set_default_fcs(chan);
 
-		sk->sk_state = BT_CONNECTED;
+		l2cap_state_change(chan, BT_CONNECTED);
 
 		chan->next_tx_seq = 0;
 		chan->expected_tx_seq = 0;
@@ -2384,9 +2585,8 @@
 		goto unlock;
 	}
 
-	if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
+	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
 		u8 buf[64];
-		chan->conf_state |= L2CAP_CONF_REQ_SENT;
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
 					l2cap_build_conf_req(chan, buf), buf);
 		chan->num_conf_req++;
@@ -2451,7 +2651,7 @@
 
 	default:
 		sk->sk_err = ECONNRESET;
-		l2cap_sock_set_timer(sk, HZ * 5);
+		__set_chan_timer(chan, HZ * 5);
 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
 		goto done;
 	}
@@ -2459,12 +2659,12 @@
 	if (flags & 0x01)
 		goto done;
 
-	chan->conf_state |= L2CAP_CONF_INPUT_DONE;
+	set_bit(CONF_INPUT_DONE, &chan->conf_state);
 
-	if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
+	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
 		set_default_fcs(chan);
 
-		sk->sk_state = BT_CONNECTED;
+		l2cap_state_change(chan, BT_CONNECTED);
 		chan->next_tx_seq = 0;
 		chan->expected_tx_seq = 0;
 		skb_queue_head_init(&chan->tx_q);
@@ -2506,9 +2706,9 @@
 
 	/* don't delete l2cap channel if sk is owned by user */
 	if (sock_owned_by_user(sk)) {
-		sk->sk_state = BT_DISCONN;
-		l2cap_sock_clear_timer(sk);
-		l2cap_sock_set_timer(sk, HZ / 5);
+		l2cap_state_change(chan, BT_DISCONN);
+		__clear_chan_timer(chan);
+		__set_chan_timer(chan, HZ / 5);
 		bh_unlock_sock(sk);
 		return 0;
 	}
@@ -2516,7 +2716,7 @@
 	l2cap_chan_del(chan, ECONNRESET);
 	bh_unlock_sock(sk);
 
-	l2cap_sock_kill(sk);
+	chan->ops->close(chan->data);
 	return 0;
 }
 
@@ -2540,9 +2740,9 @@
 
 	/* don't delete l2cap channel if sk is owned by user */
 	if (sock_owned_by_user(sk)) {
-		sk->sk_state = BT_DISCONN;
-		l2cap_sock_clear_timer(sk);
-		l2cap_sock_set_timer(sk, HZ / 5);
+		l2cap_state_change(chan,BT_DISCONN);
+		__clear_chan_timer(chan);
+		__set_chan_timer(chan, HZ / 5);
 		bh_unlock_sock(sk);
 		return 0;
 	}
@@ -2550,7 +2750,7 @@
 	l2cap_chan_del(chan, 0);
 	bh_unlock_sock(sk);
 
-	l2cap_sock_kill(sk);
+	chan->ops->close(chan->data);
 	return 0;
 }
 
@@ -2858,18 +3058,18 @@
 
 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
 
-	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
 		control |= L2CAP_SUPER_RCV_NOT_READY;
 		l2cap_send_sframe(chan, control);
-		chan->conn_state |= L2CAP_CONN_RNR_SENT;
+		set_bit(CONN_RNR_SENT, &chan->conn_state);
 	}
 
-	if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
+	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
 		l2cap_retransmit_frames(chan);
 
 	l2cap_ertm_send(chan);
 
-	if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
 			chan->frames_sent == 0) {
 		control |= L2CAP_SUPER_RCV_READY;
 		l2cap_send_sframe(chan, control);
@@ -2925,17 +3125,13 @@
 
 	switch (control & L2CAP_CTRL_SAR) {
 	case L2CAP_SDU_UNSEGMENTED:
-		if (chan->conn_state & L2CAP_CONN_SAR_SDU)
+		if (test_bit(CONN_SAR_SDU, &chan->conn_state))
 			goto drop;
 
-		err = sock_queue_rcv_skb(chan->sk, skb);
-		if (!err)
-			return err;
-
-		break;
+		return chan->ops->recv(chan->data, skb);
 
 	case L2CAP_SDU_START:
-		if (chan->conn_state & L2CAP_CONN_SAR_SDU)
+		if (test_bit(CONN_SAR_SDU, &chan->conn_state))
 			goto drop;
 
 		chan->sdu_len = get_unaligned_le16(skb->data);
@@ -2954,12 +3150,12 @@
 
 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
 
-		chan->conn_state |= L2CAP_CONN_SAR_SDU;
+		set_bit(CONN_SAR_SDU, &chan->conn_state);
 		chan->partial_sdu_len = skb->len;
 		break;
 
 	case L2CAP_SDU_CONTINUE:
-		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
+		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
 			goto disconnect;
 
 		if (!chan->sdu)
@@ -2974,39 +3170,34 @@
 		break;
 
 	case L2CAP_SDU_END:
-		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
+		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
 			goto disconnect;
 
 		if (!chan->sdu)
 			goto disconnect;
 
-		if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
-			chan->partial_sdu_len += skb->len;
+		chan->partial_sdu_len += skb->len;
 
-			if (chan->partial_sdu_len > chan->imtu)
-				goto drop;
+		if (chan->partial_sdu_len > chan->imtu)
+			goto drop;
 
-			if (chan->partial_sdu_len != chan->sdu_len)
-				goto drop;
+		if (chan->partial_sdu_len != chan->sdu_len)
+			goto drop;
 
-			memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-		}
+		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
 
 		_skb = skb_clone(chan->sdu, GFP_ATOMIC);
 		if (!_skb) {
-			chan->conn_state |= L2CAP_CONN_SAR_RETRY;
 			return -ENOMEM;
 		}
 
-		err = sock_queue_rcv_skb(chan->sk, _skb);
+		err = chan->ops->recv(chan->data, _skb);
 		if (err < 0) {
 			kfree_skb(_skb);
-			chan->conn_state |= L2CAP_CONN_SAR_RETRY;
 			return err;
 		}
 
-		chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
-		chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
+		clear_bit(CONN_SAR_SDU, &chan->conn_state);
 
 		kfree_skb(chan->sdu);
 		break;
@@ -3025,24 +3216,28 @@
 	return 0;
 }
 
-static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
+static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
 {
-	struct sk_buff *skb;
 	u16 control;
-	int err;
 
-	while ((skb = skb_dequeue(&chan->busy_q))) {
-		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
-		err = l2cap_ertm_reassembly_sdu(chan, skb, control);
-		if (err < 0) {
-			skb_queue_head(&chan->busy_q, skb);
-			return -EBUSY;
-		}
+	BT_DBG("chan %p, Enter local busy", chan);
 
-		chan->buffer_seq = (chan->buffer_seq + 1) % 64;
-	}
+	set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
 
-	if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
+	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+	control |= L2CAP_SUPER_RCV_NOT_READY;
+	l2cap_send_sframe(chan, control);
+
+	set_bit(CONN_RNR_SENT, &chan->conn_state);
+
+	__clear_ack_timer(chan);
+}
+
+static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
+{
+	u16 control;
+
+	if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
 		goto done;
 
 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
@@ -3050,103 +3245,26 @@
 	l2cap_send_sframe(chan, control);
 	chan->retry_count = 1;
 
-	del_timer(&chan->retrans_timer);
-	__mod_monitor_timer();
+	__clear_retrans_timer(chan);
+	__set_monitor_timer(chan);
 
-	chan->conn_state |= L2CAP_CONN_WAIT_F;
+	set_bit(CONN_WAIT_F, &chan->conn_state);
 
 done:
-	chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
-	chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
+	clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+	clear_bit(CONN_RNR_SENT, &chan->conn_state);
 
 	BT_DBG("chan %p, Exit local busy", chan);
-
-	return 0;
 }
 
-static void l2cap_busy_work(struct work_struct *work)
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
 {
-	DECLARE_WAITQUEUE(wait, current);
-	struct l2cap_chan *chan =
-		container_of(work, struct l2cap_chan, busy_work);
-	struct sock *sk = chan->sk;
-	int n_tries = 0, timeo = HZ/5, err;
-	struct sk_buff *skb;
-
-	lock_sock(sk);
-
-	add_wait_queue(sk_sleep(sk), &wait);
-	while ((skb = skb_peek(&chan->busy_q))) {
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
-			err = -EBUSY;
-			l2cap_send_disconn_req(chan->conn, chan, EBUSY);
-			break;
-		}
-
-		if (!timeo)
-			timeo = HZ/5;
-
-		if (signal_pending(current)) {
-			err = sock_intr_errno(timeo);
-			break;
-		}
-
-		release_sock(sk);
-		timeo = schedule_timeout(timeo);
-		lock_sock(sk);
-
-		err = sock_error(sk);
-		if (err)
-			break;
-
-		if (l2cap_try_push_rx_skb(chan) == 0)
-			break;
+	if (chan->mode == L2CAP_MODE_ERTM) {
+		if (busy)
+			l2cap_ertm_enter_local_busy(chan);
+		else
+			l2cap_ertm_exit_local_busy(chan);
 	}
-
-	set_current_state(TASK_RUNNING);
-	remove_wait_queue(sk_sleep(sk), &wait);
-
-	release_sock(sk);
-}
-
-static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
-{
-	int sctrl, err;
-
-	if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
-		bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
-		__skb_queue_tail(&chan->busy_q, skb);
-		return l2cap_try_push_rx_skb(chan);
-
-
-	}
-
-	err = l2cap_ertm_reassembly_sdu(chan, skb, control);
-	if (err >= 0) {
-		chan->buffer_seq = (chan->buffer_seq + 1) % 64;
-		return err;
-	}
-
-	/* Busy Condition */
-	BT_DBG("chan %p, Enter local busy", chan);
-
-	chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
-	bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
-	__skb_queue_tail(&chan->busy_q, skb);
-
-	sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-	sctrl |= L2CAP_SUPER_RCV_NOT_READY;
-	l2cap_send_sframe(chan, sctrl);
-
-	chan->conn_state |= L2CAP_CONN_RNR_SENT;
-
-	del_timer(&chan->ack_timer);
-
-	queue_work(_busy_wq, &chan->busy_work);
-
-	return err;
 }
 
 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
@@ -3161,19 +3279,19 @@
 
 	switch (control & L2CAP_CTRL_SAR) {
 	case L2CAP_SDU_UNSEGMENTED:
-		if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
+		if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
 			kfree_skb(chan->sdu);
 			break;
 		}
 
-		err = sock_queue_rcv_skb(chan->sk, skb);
+		err = chan->ops->recv(chan->data, skb);
 		if (!err)
 			return 0;
 
 		break;
 
 	case L2CAP_SDU_START:
-		if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
+		if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
 			kfree_skb(chan->sdu);
 			break;
 		}
@@ -3194,13 +3312,13 @@
 
 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
 
-		chan->conn_state |= L2CAP_CONN_SAR_SDU;
+		set_bit(CONN_SAR_SDU, &chan->conn_state);
 		chan->partial_sdu_len = skb->len;
 		err = 0;
 		break;
 
 	case L2CAP_SDU_CONTINUE:
-		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
+		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
 			break;
 
 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
@@ -3214,12 +3332,12 @@
 		break;
 
 	case L2CAP_SDU_END:
-		if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
+		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
 			break;
 
 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
 
-		chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
+		clear_bit(CONN_SAR_SDU, &chan->conn_state);
 		chan->partial_sdu_len += skb->len;
 
 		if (chan->partial_sdu_len > chan->imtu)
@@ -3227,7 +3345,7 @@
 
 		if (chan->partial_sdu_len == chan->sdu_len) {
 			_skb = skb_clone(chan->sdu, GFP_ATOMIC);
-			err = sock_queue_rcv_skb(chan->sk, _skb);
+			err = chan->ops->recv(chan->data, _skb);
 			if (err < 0)
 				kfree_skb(_skb);
 		}
@@ -3247,13 +3365,22 @@
 	struct sk_buff *skb;
 	u16 control;
 
-	while ((skb = skb_peek(&chan->srej_q))) {
+	while ((skb = skb_peek(&chan->srej_q)) &&
+			!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+		int err;
+
 		if (bt_cb(skb)->tx_seq != tx_seq)
 			break;
 
 		skb = skb_dequeue(&chan->srej_q);
 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
-		l2cap_ertm_reassembly_sdu(chan, skb, control);
+		err = l2cap_ertm_reassembly_sdu(chan, skb, control);
+
+		if (err < 0) {
+			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+			break;
+		}
+
 		chan->buffer_seq_srej =
 			(chan->buffer_seq_srej + 1) % 64;
 		tx_seq = (tx_seq + 1) % 64;
@@ -3310,19 +3437,16 @@
 							tx_seq, rx_control);
 
 	if (L2CAP_CTRL_FINAL & rx_control &&
-			chan->conn_state & L2CAP_CONN_WAIT_F) {
-		del_timer(&chan->monitor_timer);
+			test_bit(CONN_WAIT_F, &chan->conn_state)) {
+		__clear_monitor_timer(chan);
 		if (chan->unacked_frames > 0)
-			__mod_retrans_timer();
-		chan->conn_state &= ~L2CAP_CONN_WAIT_F;
+			__set_retrans_timer(chan);
+		clear_bit(CONN_WAIT_F, &chan->conn_state);
 	}
 
 	chan->expected_ack_seq = req_seq;
 	l2cap_drop_acked_frames(chan);
 
-	if (tx_seq == chan->expected_tx_seq)
-		goto expected;
-
 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
 	if (tx_seq_offset < 0)
 		tx_seq_offset += 64;
@@ -3333,10 +3457,13 @@
 		goto drop;
 	}
 
-	if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
+	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
 		goto drop;
 
-	if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
+	if (tx_seq == chan->expected_tx_seq)
+		goto expected;
+
+	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
 		struct srej_list *first;
 
 		first = list_first_entry(&chan->srej_l,
@@ -3350,7 +3477,7 @@
 
 			if (list_empty(&chan->srej_l)) {
 				chan->buffer_seq = chan->buffer_seq_srej;
-				chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
+				clear_bit(CONN_SREJ_SENT, &chan->conn_state);
 				l2cap_send_ack(chan);
 				BT_DBG("chan %p, Exit SREJ_SENT", chan);
 			}
@@ -3379,7 +3506,7 @@
 		if (tx_seq_offset < expected_tx_seq_offset)
 			goto drop;
 
-		chan->conn_state |= L2CAP_CONN_SREJ_SENT;
+		set_bit(CONN_SREJ_SENT, &chan->conn_state);
 
 		BT_DBG("chan %p, Enter SREJ", chan);
 
@@ -3387,39 +3514,39 @@
 		chan->buffer_seq_srej = chan->buffer_seq;
 
 		__skb_queue_head_init(&chan->srej_q);
-		__skb_queue_head_init(&chan->busy_q);
 		l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
 
-		chan->conn_state |= L2CAP_CONN_SEND_PBIT;
+		set_bit(CONN_SEND_PBIT, &chan->conn_state);
 
 		l2cap_send_srejframe(chan, tx_seq);
 
-		del_timer(&chan->ack_timer);
+		__clear_ack_timer(chan);
 	}
 	return 0;
 
 expected:
 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
 
-	if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
+	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
 		bt_cb(skb)->tx_seq = tx_seq;
 		bt_cb(skb)->sar = sar;
 		__skb_queue_tail(&chan->srej_q, skb);
 		return 0;
 	}
 
-	err = l2cap_push_rx_skb(chan, skb, rx_control);
-	if (err < 0)
-		return 0;
+	err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
+	chan->buffer_seq = (chan->buffer_seq + 1) % 64;
+	if (err < 0) {
+		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+		return err;
+	}
 
 	if (rx_control & L2CAP_CTRL_FINAL) {
-		if (chan->conn_state & L2CAP_CONN_REJ_ACT)
-			chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
-		else
+		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
 			l2cap_retransmit_frames(chan);
 	}
 
-	__mod_ack_timer();
+	__set_ack_timer(chan);
 
 	chan->num_acked = (chan->num_acked + 1) % num_to_ack;
 	if (chan->num_acked == num_to_ack - 1)
@@ -3441,33 +3568,31 @@
 	l2cap_drop_acked_frames(chan);
 
 	if (rx_control & L2CAP_CTRL_POLL) {
-		chan->conn_state |= L2CAP_CONN_SEND_FBIT;
-		if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
-			if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+		set_bit(CONN_SEND_FBIT, &chan->conn_state);
+		if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
+			if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
 					(chan->unacked_frames > 0))
-				__mod_retrans_timer();
+				__set_retrans_timer(chan);
 
-			chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 			l2cap_send_srejtail(chan);
 		} else {
 			l2cap_send_i_or_rr_or_rnr(chan);
 		}
 
 	} else if (rx_control & L2CAP_CTRL_FINAL) {
-		chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
-		if (chan->conn_state & L2CAP_CONN_REJ_ACT)
-			chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
-		else
+		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
 			l2cap_retransmit_frames(chan);
 
 	} else {
-		if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
 				(chan->unacked_frames > 0))
-			__mod_retrans_timer();
+			__set_retrans_timer(chan);
 
-		chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-		if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
+		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+		if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
 			l2cap_send_ack(chan);
 		else
 			l2cap_ertm_send(chan);
@@ -3480,21 +3605,19 @@
 
 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
 
-	chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
 	chan->expected_ack_seq = tx_seq;
 	l2cap_drop_acked_frames(chan);
 
 	if (rx_control & L2CAP_CTRL_FINAL) {
-		if (chan->conn_state & L2CAP_CONN_REJ_ACT)
-			chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
-		else
+		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
 			l2cap_retransmit_frames(chan);
 	} else {
 		l2cap_retransmit_frames(chan);
 
-		if (chan->conn_state & L2CAP_CONN_WAIT_F)
-			chan->conn_state |= L2CAP_CONN_REJ_ACT;
+		if (test_bit(CONN_WAIT_F, &chan->conn_state))
+			set_bit(CONN_REJ_ACT, &chan->conn_state);
 	}
 }
 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
@@ -3503,32 +3626,32 @@
 
 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
 
-	chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
 	if (rx_control & L2CAP_CTRL_POLL) {
 		chan->expected_ack_seq = tx_seq;
 		l2cap_drop_acked_frames(chan);
 
-		chan->conn_state |= L2CAP_CONN_SEND_FBIT;
+		set_bit(CONN_SEND_FBIT, &chan->conn_state);
 		l2cap_retransmit_one_frame(chan, tx_seq);
 
 		l2cap_ertm_send(chan);
 
-		if (chan->conn_state & L2CAP_CONN_WAIT_F) {
+		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
 			chan->srej_save_reqseq = tx_seq;
-			chan->conn_state |= L2CAP_CONN_SREJ_ACT;
+			set_bit(CONN_SREJ_ACT, &chan->conn_state);
 		}
 	} else if (rx_control & L2CAP_CTRL_FINAL) {
-		if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
+		if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
 				chan->srej_save_reqseq == tx_seq)
-			chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
 		else
 			l2cap_retransmit_one_frame(chan, tx_seq);
 	} else {
 		l2cap_retransmit_one_frame(chan, tx_seq);
-		if (chan->conn_state & L2CAP_CONN_WAIT_F) {
+		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
 			chan->srej_save_reqseq = tx_seq;
-			chan->conn_state |= L2CAP_CONN_SREJ_ACT;
+			set_bit(CONN_SREJ_ACT, &chan->conn_state);
 		}
 	}
 }
@@ -3539,15 +3662,15 @@
 
 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
 
-	chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 	chan->expected_ack_seq = tx_seq;
 	l2cap_drop_acked_frames(chan);
 
 	if (rx_control & L2CAP_CTRL_POLL)
-		chan->conn_state |= L2CAP_CONN_SEND_FBIT;
+		set_bit(CONN_SEND_FBIT, &chan->conn_state);
 
-	if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
-		del_timer(&chan->retrans_timer);
+	if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
+		__clear_retrans_timer(chan);
 		if (rx_control & L2CAP_CTRL_POLL)
 			l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
 		return;
@@ -3564,11 +3687,11 @@
 	BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
 
 	if (L2CAP_CTRL_FINAL & rx_control &&
-			chan->conn_state & L2CAP_CONN_WAIT_F) {
-		del_timer(&chan->monitor_timer);
+			test_bit(CONN_WAIT_F, &chan->conn_state)) {
+		__clear_monitor_timer(chan);
 		if (chan->unacked_frames > 0)
-			__mod_retrans_timer();
-		chan->conn_state &= ~L2CAP_CONN_WAIT_F;
+			__set_retrans_timer(chan);
+		clear_bit(CONN_WAIT_F, &chan->conn_state);
 	}
 
 	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
@@ -3667,7 +3790,6 @@
 {
 	struct l2cap_chan *chan;
 	struct sock *sk = NULL;
-	struct l2cap_pinfo *pi;
 	u16 control;
 	u8 tx_seq;
 	int len;
@@ -3679,11 +3801,10 @@
 	}
 
 	sk = chan->sk;
-	pi = l2cap_pi(sk);
 
 	BT_DBG("chan %p, len %d", chan, skb->len);
 
-	if (sk->sk_state != BT_CONNECTED)
+	if (chan->state != BT_CONNECTED)
 		goto drop;
 
 	switch (chan->mode) {
@@ -3696,7 +3817,7 @@
 		if (chan->imtu < skb->len)
 			goto drop;
 
-		if (!sock_queue_rcv_skb(sk, skb))
+		if (!chan->ops->recv(chan->data, skb))
 			goto done;
 		break;
 
@@ -3768,13 +3889,13 @@
 
 	BT_DBG("sk %p, len %d", sk, skb->len);
 
-	if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
+	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
 		goto drop;
 
-	if (l2cap_pi(sk)->chan->imtu < skb->len)
+	if (chan->imtu < skb->len)
 		goto drop;
 
-	if (!sock_queue_rcv_skb(sk, skb))
+	if (!chan->ops->recv(chan->data, skb))
 		goto done;
 
 drop:
@@ -3801,13 +3922,13 @@
 
 	BT_DBG("sk %p, len %d", sk, skb->len);
 
-	if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
+	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
 		goto drop;
 
-	if (l2cap_pi(sk)->chan->imtu < skb->len)
+	if (chan->imtu < skb->len)
 		goto drop;
 
-	if (!sock_queue_rcv_skb(sk, skb))
+	if (!chan->ops->recv(chan->data, skb))
 		goto done;
 
 drop:
@@ -3852,6 +3973,11 @@
 		l2cap_att_channel(conn, cid, skb);
 		break;
 
+	case L2CAP_CID_SMP:
+		if (smp_sig_channel(conn, skb))
+			l2cap_conn_del(conn->hcon, EACCES);
+		break;
+
 	default:
 		l2cap_data_channel(conn, cid, skb);
 		break;
@@ -3875,7 +4001,7 @@
 	list_for_each_entry(c, &chan_list, global_l) {
 		struct sock *sk = c->sk;
 
-		if (sk->sk_state != BT_LISTEN)
+		if (c->state != BT_LISTEN)
 			continue;
 
 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
@@ -3908,7 +4034,7 @@
 		if (conn)
 			l2cap_conn_ready(conn);
 	} else
-		l2cap_conn_del(hcon, bt_err(status));
+		l2cap_conn_del(hcon, bt_to_errno(status));
 
 	return 0;
 }
@@ -3919,7 +4045,7 @@
 
 	BT_DBG("hcon %p", hcon);
 
-	if (hcon->type != ACL_LINK || !conn)
+	if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
 		return 0x13;
 
 	return conn->disc_reason;
@@ -3932,27 +4058,25 @@
 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
 		return -EINVAL;
 
-	l2cap_conn_del(hcon, bt_err(reason));
+	l2cap_conn_del(hcon, bt_to_errno(reason));
 
 	return 0;
 }
 
 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
 {
-	struct sock *sk = chan->sk;
-
-	if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
+	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
 		return;
 
 	if (encrypt == 0x00) {
 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
-			l2cap_sock_clear_timer(sk);
-			l2cap_sock_set_timer(sk, HZ * 5);
+			__clear_chan_timer(chan);
+			__set_chan_timer(chan, HZ * 5);
 		} else if (chan->sec_level == BT_SECURITY_HIGH)
-			__l2cap_sock_close(sk, ECONNREFUSED);
+			l2cap_chan_close(chan, ECONNREFUSED);
 	} else {
 		if (chan->sec_level == BT_SECURITY_MEDIUM)
-			l2cap_sock_clear_timer(sk);
+			__clear_chan_timer(chan);
 	}
 }
 
@@ -3973,34 +4097,48 @@
 
 		bh_lock_sock(sk);
 
-		if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
+		BT_DBG("chan->scid %d", chan->scid);
+
+		if (chan->scid == L2CAP_CID_LE_DATA) {
+			if (!status && encrypt) {
+				chan->sec_level = hcon->sec_level;
+				del_timer(&conn->security_timer);
+				l2cap_chan_ready(sk);
+				smp_distribute_keys(conn, 0);
+			}
+
 			bh_unlock_sock(sk);
 			continue;
 		}
 
-		if (!status && (sk->sk_state == BT_CONNECTED ||
-						sk->sk_state == BT_CONFIG)) {
+		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
+			bh_unlock_sock(sk);
+			continue;
+		}
+
+		if (!status && (chan->state == BT_CONNECTED ||
+						chan->state == BT_CONFIG)) {
 			l2cap_check_encryption(chan, encrypt);
 			bh_unlock_sock(sk);
 			continue;
 		}
 
-		if (sk->sk_state == BT_CONNECT) {
+		if (chan->state == BT_CONNECT) {
 			if (!status) {
 				struct l2cap_conn_req req;
 				req.scid = cpu_to_le16(chan->scid);
 				req.psm  = chan->psm;
 
 				chan->ident = l2cap_get_ident(conn);
-				chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
+				set_bit(CONF_CONNECT_PEND, &chan->conf_state);
 
 				l2cap_send_cmd(conn, chan->ident,
 					L2CAP_CONN_REQ, sizeof(req), &req);
 			} else {
-				l2cap_sock_clear_timer(sk);
-				l2cap_sock_set_timer(sk, HZ / 10);
+				__clear_chan_timer(chan);
+				__set_chan_timer(chan, HZ / 10);
 			}
-		} else if (sk->sk_state == BT_CONNECT2) {
+		} else if (chan->state == BT_CONNECT2) {
 			struct l2cap_conn_rsp rsp;
 			__u16 res, stat;
 
@@ -4011,13 +4149,13 @@
 					stat = L2CAP_CS_AUTHOR_PEND;
 					parent->sk_data_ready(parent, 0);
 				} else {
-					sk->sk_state = BT_CONFIG;
+					l2cap_state_change(chan, BT_CONFIG);
 					res = L2CAP_CR_SUCCESS;
 					stat = L2CAP_CS_NO_INFO;
 				}
 			} else {
-				sk->sk_state = BT_DISCONN;
-				l2cap_sock_set_timer(sk, HZ / 10);
+				l2cap_state_change(chan, BT_DISCONN);
+				__set_chan_timer(chan, HZ / 10);
 				res = L2CAP_CR_SEC_BLOCK;
 				stat = L2CAP_CS_NO_INFO;
 			}
@@ -4161,7 +4299,7 @@
 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
 					batostr(&bt_sk(sk)->src),
 					batostr(&bt_sk(sk)->dst),
-					sk->sk_state, __le16_to_cpu(c->psm),
+					c->state, __le16_to_cpu(c->psm),
 					c->scid, c->dcid, c->imtu, c->omtu,
 					c->sec_level, c->mode);
 	}
@@ -4204,12 +4342,6 @@
 	if (err < 0)
 		return err;
 
-	_busy_wq = create_singlethread_workqueue("l2cap");
-	if (!_busy_wq) {
-		err = -ENOMEM;
-		goto error;
-	}
-
 	err = hci_register_proto(&l2cap_hci_proto);
 	if (err < 0) {
 		BT_ERR("L2CAP protocol registration failed");
@@ -4227,7 +4359,6 @@
 	return 0;
 
 error:
-	destroy_workqueue(_busy_wq);
 	l2cap_cleanup_sockets();
 	return err;
 }
@@ -4236,9 +4367,6 @@
 {
 	debugfs_remove(l2cap_debugfs);
 
-	flush_workqueue(_busy_wq);
-	destroy_workqueue(_busy_wq);
-
 	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
 		BT_ERR("L2CAP protocol unregistration failed");
 
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 8248303..5c36b3e 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -29,54 +29,11 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/smp.h>
 
 static const struct proto_ops l2cap_sock_ops;
-
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_timeout(unsigned long arg)
-{
-	struct sock *sk = (struct sock *) arg;
-	int reason;
-
-	BT_DBG("sock %p state %d", sk, sk->sk_state);
-
-	bh_lock_sock(sk);
-
-	if (sock_owned_by_user(sk)) {
-		/* sk is owned by user. Try again later */
-		l2cap_sock_set_timer(sk, HZ / 5);
-		bh_unlock_sock(sk);
-		sock_put(sk);
-		return;
-	}
-
-	if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
-		reason = ECONNREFUSED;
-	else if (sk->sk_state == BT_CONNECT &&
-			l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
-		reason = ECONNREFUSED;
-	else
-		reason = ETIMEDOUT;
-
-	__l2cap_sock_close(sk, reason);
-
-	bh_unlock_sock(sk);
-
-	l2cap_sock_kill(sk);
-	sock_put(sk);
-}
-
-void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
-	BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
-	sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-void l2cap_sock_clear_timer(struct sock *sk)
-{
-	BT_DBG("sock %p state %d", sk, sk->sk_state);
-	sk_stop_timer(sk, &sk->sk_timer);
-}
+static void l2cap_sock_init(struct sock *sk, struct sock *parent);
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
 
 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
 {
@@ -133,6 +90,8 @@
 		chan->sec_level = BT_SECURITY_SDP;
 
 	bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+
+	chan->state = BT_BOUND;
 	sk->sk_state = BT_BOUND;
 
 done:
@@ -162,7 +121,7 @@
 
 	lock_sock(sk);
 
-	if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
 			&& !(la.l2_psm || la.l2_cid)) {
 		err = -EINVAL;
 		goto done;
@@ -204,8 +163,8 @@
 	}
 
 	/* PSM must be odd and lsb of upper byte must be 0 */
-	if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
-				sk->sk_type != SOCK_RAW && !la.l2_cid) {
+	if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
+					chan->chan_type != L2CAP_CHAN_RAW) {
 		err = -EINVAL;
 		goto done;
 	}
@@ -258,6 +217,8 @@
 
 	sk->sk_max_ack_backlog = backlog;
 	sk->sk_ack_backlog = 0;
+
+	chan->state = BT_LISTEN;
 	sk->sk_state = BT_LISTEN;
 
 done:
@@ -437,6 +398,7 @@
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	struct bt_security sec;
+	struct bt_power pwr;
 	int len, err = 0;
 
 	BT_DBG("sk %p", sk);
@@ -454,14 +416,18 @@
 
 	switch (optname) {
 	case BT_SECURITY:
-		if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-				&& sk->sk_type != SOCK_RAW) {
+		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+					chan->chan_type != L2CAP_CHAN_RAW) {
 			err = -EINVAL;
 			break;
 		}
 
+		memset(&sec, 0, sizeof(sec));
 		sec.level = chan->sec_level;
 
+		if (sk->sk_state == BT_CONNECTED)
+			sec.key_size = chan->conn->hcon->enc_key_size;
+
 		len = min_t(unsigned int, len, sizeof(sec));
 		if (copy_to_user(optval, (char *) &sec, len))
 			err = -EFAULT;
@@ -485,6 +451,21 @@
 
 		break;
 
+	case BT_POWER:
+		if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+				&& sk->sk_type != SOCK_RAW) {
+			err = -EINVAL;
+			break;
+		}
+
+		pwr.force_active = chan->force_active;
+
+		len = min_t(unsigned int, len, sizeof(pwr));
+		if (copy_to_user(optval, (char *) &pwr, len))
+			err = -EFAULT;
+
+		break;
+
 	default:
 		err = -ENOPROTOOPT;
 		break;
@@ -535,7 +516,7 @@
 		chan->mode = opts.mode;
 		switch (chan->mode) {
 		case L2CAP_MODE_BASIC:
-			chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
+			clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
 			break;
 		case L2CAP_MODE_ERTM:
 		case L2CAP_MODE_STREAMING:
@@ -585,6 +566,8 @@
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	struct bt_security sec;
+	struct bt_power pwr;
+	struct l2cap_conn *conn;
 	int len, err = 0;
 	u32 opt;
 
@@ -600,8 +583,8 @@
 
 	switch (optname) {
 	case BT_SECURITY:
-		if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
-				&& sk->sk_type != SOCK_RAW) {
+		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+					chan->chan_type != L2CAP_CHAN_RAW) {
 			err = -EINVAL;
 			break;
 		}
@@ -621,6 +604,20 @@
 		}
 
 		chan->sec_level = sec.level;
+
+		conn = chan->conn;
+		if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+			if (!conn->hcon->out) {
+				err = -EINVAL;
+				break;
+			}
+
+			if (smp_conn_security(conn, sec.level))
+				break;
+
+			err = 0;
+			sk->sk_state = BT_CONFIG;
+		}
 		break;
 
 	case BT_DEFER_SETUP:
@@ -661,6 +658,23 @@
 		chan->flushable = opt;
 		break;
 
+	case BT_POWER:
+		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+					chan->chan_type != L2CAP_CHAN_RAW) {
+			err = -EINVAL;
+			break;
+		}
+
+		pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+
+		len = min_t(unsigned int, sizeof(pwr), optlen);
+		if (copy_from_user((char *) &pwr, optval, len)) {
+			err = -EFAULT;
+			break;
+		}
+		chan->force_active = pwr.force_active;
+		break;
+
 	default:
 		err = -ENOPROTOOPT;
 		break;
@@ -674,8 +688,6 @@
 {
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-	struct sk_buff *skb;
-	u16 control;
 	int err;
 
 	BT_DBG("sock %p, sk %p", sock, sk);
@@ -690,87 +702,12 @@
 	lock_sock(sk);
 
 	if (sk->sk_state != BT_CONNECTED) {
-		err = -ENOTCONN;
-		goto done;
+		release_sock(sk);
+		return -ENOTCONN;
 	}
 
-	/* Connectionless channel */
-	if (sk->sk_type == SOCK_DGRAM) {
-		skb = l2cap_create_connless_pdu(chan, msg, len);
-		if (IS_ERR(skb)) {
-			err = PTR_ERR(skb);
-		} else {
-			l2cap_do_send(chan, skb);
-			err = len;
-		}
-		goto done;
-	}
+	err = l2cap_chan_send(chan, msg, len);
 
-	switch (chan->mode) {
-	case L2CAP_MODE_BASIC:
-		/* Check outgoing MTU */
-		if (len > chan->omtu) {
-			err = -EMSGSIZE;
-			goto done;
-		}
-
-		/* Create a basic PDU */
-		skb = l2cap_create_basic_pdu(chan, msg, len);
-		if (IS_ERR(skb)) {
-			err = PTR_ERR(skb);
-			goto done;
-		}
-
-		l2cap_do_send(chan, skb);
-		err = len;
-		break;
-
-	case L2CAP_MODE_ERTM:
-	case L2CAP_MODE_STREAMING:
-		/* Entire SDU fits into one PDU */
-		if (len <= chan->remote_mps) {
-			control = L2CAP_SDU_UNSEGMENTED;
-			skb = l2cap_create_iframe_pdu(chan, msg, len, control,
-									0);
-			if (IS_ERR(skb)) {
-				err = PTR_ERR(skb);
-				goto done;
-			}
-			__skb_queue_tail(&chan->tx_q, skb);
-
-			if (chan->tx_send_head == NULL)
-				chan->tx_send_head = skb;
-
-		} else {
-		/* Segment SDU into multiples PDUs */
-			err = l2cap_sar_segment_sdu(chan, msg, len);
-			if (err < 0)
-				goto done;
-		}
-
-		if (chan->mode == L2CAP_MODE_STREAMING) {
-			l2cap_streaming_send(chan);
-			err = len;
-			break;
-		}
-
-		if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
-				(chan->conn_state & L2CAP_CONN_WAIT_F)) {
-			err = len;
-			break;
-		}
-		err = l2cap_ertm_send(chan);
-
-		if (err >= 0)
-			err = len;
-		break;
-
-	default:
-		BT_DBG("bad state %1.1x", chan->mode);
-		err = -EBADFD;
-	}
-
-done:
 	release_sock(sk);
 	return err;
 }
@@ -778,13 +715,15 @@
 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
 {
 	struct sock *sk = sock->sk;
+	struct l2cap_pinfo *pi = l2cap_pi(sk);
+	int err;
 
 	lock_sock(sk);
 
 	if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
 		sk->sk_state = BT_CONFIG;
 
-		__l2cap_connect_rsp_defer(l2cap_pi(sk)->chan);
+		__l2cap_connect_rsp_defer(pi->chan);
 		release_sock(sk);
 		return 0;
 	}
@@ -792,15 +731,43 @@
 	release_sock(sk);
 
 	if (sock->type == SOCK_STREAM)
-		return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+		err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+	else
+		err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
 
-	return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+	if (pi->chan->mode != L2CAP_MODE_ERTM)
+		return err;
+
+	/* Attempt to put pending rx data in the socket buffer */
+
+	lock_sock(sk);
+
+	if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+		goto done;
+
+	if (pi->rx_busy_skb) {
+		if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+			pi->rx_busy_skb = NULL;
+		else
+			goto done;
+	}
+
+	/* Restore data flow when half of the receive buffer is
+	 * available.  This avoids resending large numbers of
+	 * frames.
+	 */
+	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+		l2cap_chan_busy(pi->chan, 0);
+
+done:
+	release_sock(sk);
+	return err;
 }
 
 /* Kill socket (only if zapped and orphan)
  * Must be called on unlocked socket.
  */
-void l2cap_sock_kill(struct sock *sk)
+static void l2cap_sock_kill(struct sock *sk)
 {
 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 		return;
@@ -814,87 +781,6 @@
 	sock_put(sk);
 }
 
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
-{
-	l2cap_sock_clear_timer(sk);
-	lock_sock(sk);
-	__l2cap_sock_close(sk, ECONNRESET);
-	release_sock(sk);
-	l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_cleanup_listen(struct sock *parent)
-{
-	struct sock *sk;
-
-	BT_DBG("parent %p", parent);
-
-	/* Close not yet accepted channels */
-	while ((sk = bt_accept_dequeue(parent, NULL)))
-		l2cap_sock_close(sk);
-
-	parent->sk_state = BT_CLOSED;
-	sock_set_flag(parent, SOCK_ZAPPED);
-}
-
-void __l2cap_sock_close(struct sock *sk, int reason)
-{
-	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-	struct l2cap_conn *conn = chan->conn;
-
-	BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
-
-	switch (sk->sk_state) {
-	case BT_LISTEN:
-		l2cap_sock_cleanup_listen(sk);
-		break;
-
-	case BT_CONNECTED:
-	case BT_CONFIG:
-		if ((sk->sk_type == SOCK_SEQPACKET ||
-					sk->sk_type == SOCK_STREAM) &&
-					conn->hcon->type == ACL_LINK) {
-			l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-			l2cap_send_disconn_req(conn, chan, reason);
-		} else
-			l2cap_chan_del(chan, reason);
-		break;
-
-	case BT_CONNECT2:
-		if ((sk->sk_type == SOCK_SEQPACKET ||
-					sk->sk_type == SOCK_STREAM) &&
-					conn->hcon->type == ACL_LINK) {
-			struct l2cap_conn_rsp rsp;
-			__u16 result;
-
-			if (bt_sk(sk)->defer_setup)
-				result = L2CAP_CR_SEC_BLOCK;
-			else
-				result = L2CAP_CR_BAD_PSM;
-
-			rsp.scid   = cpu_to_le16(chan->dcid);
-			rsp.dcid   = cpu_to_le16(chan->scid);
-			rsp.result = cpu_to_le16(result);
-			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
-							sizeof(rsp), &rsp);
-		}
-
-		l2cap_chan_del(chan, reason);
-		break;
-
-	case BT_CONNECT:
-	case BT_DISCONN:
-		l2cap_chan_del(chan, reason);
-		break;
-
-	default:
-		sock_set_flag(sk, SOCK_ZAPPED);
-		break;
-	}
-}
-
 static int l2cap_sock_shutdown(struct socket *sock, int how)
 {
 	struct sock *sk = sock->sk;
@@ -912,8 +798,7 @@
 			err = __l2cap_wait_ack(sk);
 
 		sk->sk_shutdown = SHUTDOWN_MASK;
-		l2cap_sock_clear_timer(sk);
-		__l2cap_sock_close(sk, 0);
+		l2cap_chan_close(chan, 0);
 
 		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
 			err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -944,15 +829,85 @@
 	return err;
 }
 
+static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
+{
+	struct sock *sk, *parent = data;
+
+	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
+								GFP_ATOMIC);
+	if (!sk)
+		return NULL;
+
+	l2cap_sock_init(sk, parent);
+
+	return l2cap_pi(sk)->chan;
+}
+
+static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
+{
+	int err;
+	struct sock *sk = data;
+	struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+	if (pi->rx_busy_skb)
+		return -ENOMEM;
+
+	err = sock_queue_rcv_skb(sk, skb);
+
+	/* For ERTM, handle one skb that doesn't fit into the recv
+	 * buffer.  This is important to do because the data frames
+	 * have already been acked, so the skb cannot be discarded.
+	 *
+	 * Notify the l2cap core that the buffer is full, so the
+	 * LOCAL_BUSY state is entered and no more frames are
+	 * acked and reassembled until there is buffer space
+	 * available.
+	 */
+	if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
+		pi->rx_busy_skb = skb;
+		l2cap_chan_busy(pi->chan, 1);
+		err = 0;
+	}
+
+	return err;
+}
+
+static void l2cap_sock_close_cb(void *data)
+{
+	struct sock *sk = data;
+
+	l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_state_change_cb(void *data, int state)
+{
+	struct sock *sk = data;
+
+	sk->sk_state = state;
+}
+
+static struct l2cap_ops l2cap_chan_ops = {
+	.name		= "L2CAP Socket Interface",
+	.new_connection	= l2cap_sock_new_connection_cb,
+	.recv		= l2cap_sock_recv_cb,
+	.close		= l2cap_sock_close_cb,
+	.state_change	= l2cap_sock_state_change_cb,
+};
+
 static void l2cap_sock_destruct(struct sock *sk)
 {
 	BT_DBG("sk %p", sk);
 
+	if (l2cap_pi(sk)->rx_busy_skb) {
+		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+		l2cap_pi(sk)->rx_busy_skb = NULL;
+	}
+
 	skb_queue_purge(&sk->sk_receive_queue);
 	skb_queue_purge(&sk->sk_write_queue);
 }
 
-void l2cap_sock_init(struct sock *sk, struct sock *parent)
+static void l2cap_sock_init(struct sock *sk, struct sock *parent)
 {
 	struct l2cap_pinfo *pi = l2cap_pi(sk);
 	struct l2cap_chan *chan = pi->chan;
@@ -965,6 +920,7 @@
 		sk->sk_type = parent->sk_type;
 		bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
 
+		chan->chan_type = pchan->chan_type;
 		chan->imtu = pchan->imtu;
 		chan->omtu = pchan->omtu;
 		chan->conf_state = pchan->conf_state;
@@ -976,12 +932,27 @@
 		chan->role_switch = pchan->role_switch;
 		chan->force_reliable = pchan->force_reliable;
 		chan->flushable = pchan->flushable;
+		chan->force_active = pchan->force_active;
 	} else {
+
+		switch (sk->sk_type) {
+		case SOCK_RAW:
+			chan->chan_type = L2CAP_CHAN_RAW;
+			break;
+		case SOCK_DGRAM:
+			chan->chan_type = L2CAP_CHAN_CONN_LESS;
+			break;
+		case SOCK_SEQPACKET:
+		case SOCK_STREAM:
+			chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
+			break;
+		}
+
 		chan->imtu = L2CAP_DEFAULT_MTU;
 		chan->omtu = 0;
 		if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
 			chan->mode = L2CAP_MODE_ERTM;
-			chan->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+			set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
 		} else {
 			chan->mode = L2CAP_MODE_BASIC;
 		}
@@ -992,10 +963,15 @@
 		chan->role_switch = 0;
 		chan->force_reliable = 0;
 		chan->flushable = BT_FLUSHABLE_OFF;
+		chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
+
 	}
 
 	/* Default config options */
 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+	chan->data = sk;
+	chan->ops = &l2cap_chan_ops;
 }
 
 static struct proto l2cap_proto = {
@@ -1004,9 +980,10 @@
 	.obj_size	= sizeof(struct l2cap_pinfo)
 };
 
-struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
 {
 	struct sock *sk;
+	struct l2cap_chan *chan;
 
 	sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
 	if (!sk)
@@ -1023,7 +1000,13 @@
 	sk->sk_protocol = proto;
 	sk->sk_state = BT_OPEN;
 
-	setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
+	chan = l2cap_chan_create(sk);
+	if (!chan) {
+		l2cap_sock_kill(sk);
+		return NULL;
+	}
+
+	l2cap_pi(sk)->chan = chan;
 
 	return sk;
 }
@@ -1032,7 +1015,6 @@
 			     int kern)
 {
 	struct sock *sk;
-	struct l2cap_chan *chan;
 
 	BT_DBG("sock %p", sock);
 
@@ -1051,14 +1033,6 @@
 	if (!sk)
 		return -ENOMEM;
 
-	chan = l2cap_chan_create(sk);
-	if (!chan) {
-		l2cap_sock_kill(sk);
-		return -ENOMEM;
-	}
-
-	l2cap_pi(sk)->chan = chan;
-
 	l2cap_sock_init(sk, NULL);
 	return 0;
 }
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b826d1b..86a6bed 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -59,7 +59,7 @@
 EXPORT_SYMBOL(batostr);
 
 /* Bluetooth error codes to Unix errno mapping */
-int bt_err(__u16 code)
+int bt_to_errno(__u16 code)
 {
 	switch (code) {
 	case 0:
@@ -149,4 +149,23 @@
 		return ENOSYS;
 	}
 }
-EXPORT_SYMBOL(bt_err);
+EXPORT_SYMBOL(bt_to_errno);
+
+int bt_printk(const char *level, const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+	int r;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	r = printk("%sBluetooth: %pV\n", level, &vaf);
+
+	va_end(args);
+
+	return r;
+}
+EXPORT_SYMBOL(bt_printk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index dae382c..9832721 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -41,7 +41,7 @@
 	void *user_data;
 };
 
-LIST_HEAD(cmd_list);
+static LIST_HEAD(cmd_list);
 
 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 {
@@ -179,7 +179,7 @@
 
 	hci_del_off_timer(hdev);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	set_bit(HCI_MGMT, &hdev->flags);
 
@@ -208,7 +208,7 @@
 
 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
 
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -316,7 +316,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	up = test_bit(HCI_UP, &hdev->flags);
 	if ((cp->val && up) || (!cp->val && !up)) {
@@ -343,7 +343,7 @@
 	err = 0;
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 	return err;
 }
@@ -368,7 +368,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -403,7 +403,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -429,7 +429,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -463,7 +463,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -522,7 +522,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (cp->val)
 		set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -538,7 +538,7 @@
 	err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -739,7 +739,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
 	if (!uuid) {
@@ -763,7 +763,7 @@
 	err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -788,7 +788,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
 		err = hci_uuids_clear(hdev);
@@ -823,7 +823,7 @@
 	err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
 
 unlock:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -847,7 +847,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	hdev->major_class = cp->major;
 	hdev->minor_class = cp->minor;
@@ -857,7 +857,7 @@
 	if (err == 0)
 		err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
 
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -879,7 +879,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	BT_DBG("hci%u enable %d", index, cp->enable);
 
@@ -897,7 +897,7 @@
 		err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
 									0);
 
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -908,7 +908,7 @@
 	struct hci_dev *hdev;
 	struct mgmt_cp_load_keys *cp;
 	u16 key_count, expected_len;
-	int i;
+	int i, err;
 
 	cp = (void *) data;
 
@@ -918,9 +918,9 @@
 	key_count = get_unaligned_le16(&cp->key_count);
 
 	expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
-	if (expected_len != len) {
-		BT_ERR("load_keys: expected %u bytes, got %u bytes",
-							len, expected_len);
+	if (expected_len > len) {
+		BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
+							expected_len, len);
 		return -EINVAL;
 	}
 
@@ -931,7 +931,7 @@
 	BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
 								key_count);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	hci_link_keys_clear(hdev);
 
@@ -942,17 +942,36 @@
 	else
 		clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
 
-	for (i = 0; i < key_count; i++) {
-		struct mgmt_key_info *key = &cp->keys[i];
+	len -= sizeof(*cp);
+	i = 0;
+
+	while (i < len) {
+		struct mgmt_key_info *key = (void *) cp->keys + i;
+
+		i += sizeof(*key) + key->dlen;
+
+		if (key->type == HCI_LK_SMP_LTK) {
+			struct key_master_id *id = (void *) key->data;
+
+			if (key->dlen != sizeof(struct key_master_id))
+				continue;
+
+			hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
+						id->ediv, id->rand, key->val);
+
+			continue;
+		}
 
 		hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
 								key->pin_len);
 	}
 
-	hci_dev_unlock(hdev);
+	err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
+
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
-	return 0;
+	return err;
 }
 
 static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -971,7 +990,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	err = hci_remove_link_key(hdev, &cp->bdaddr);
 	if (err < 0) {
@@ -990,11 +1009,11 @@
 
 		put_unaligned_le16(conn->handle, &dc.handle);
 		dc.reason = 0x13; /* Remote User Terminated Connection */
-		err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
+		err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
 	}
 
 unlock:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1020,7 +1039,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -1055,7 +1074,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1076,7 +1095,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	count = 0;
 	list_for_each(p, &hdev->conn_hash.list) {
@@ -1092,8 +1111,6 @@
 
 	put_unaligned_le16(count, &rp->conn_count);
 
-	read_lock(&hci_dev_list_lock);
-
 	i = 0;
 	list_for_each(p, &hdev->conn_hash.list) {
 		struct hci_conn *c = list_entry(p, struct hci_conn, list);
@@ -1101,22 +1118,41 @@
 		bacpy(&rp->conn[i++], &c->dst);
 	}
 
-	read_unlock(&hci_dev_list_lock);
-
 	err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
 
 unlock:
 	kfree(rp);
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 	return err;
 }
 
+static int send_pin_code_neg_reply(struct sock *sk, u16 index,
+		struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
+{
+	struct pending_cmd *cmd;
+	int err;
+
+	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
+								sizeof(*cp));
+	if (!cmd)
+		return -ENOMEM;
+
+	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
+								&cp->bdaddr);
+	if (err < 0)
+		mgmt_pending_remove(cmd);
+
+	return err;
+}
+
 static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
 									u16 len)
 {
 	struct hci_dev *hdev;
+	struct hci_conn *conn;
 	struct mgmt_cp_pin_code_reply *cp;
+	struct mgmt_cp_pin_code_neg_reply ncp;
 	struct hci_cp_pin_code_reply reply;
 	struct pending_cmd *cmd;
 	int err;
@@ -1132,13 +1168,32 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
 		goto failed;
 	}
 
+	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+	if (!conn) {
+		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
+		goto failed;
+	}
+
+	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
+		bacpy(&ncp.bdaddr, &cp->bdaddr);
+
+		BT_ERR("PIN code is not 16 bytes long");
+
+		err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
+		if (err >= 0)
+			err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+								EINVAL);
+
+		goto failed;
+	}
+
 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
 	if (!cmd) {
 		err = -ENOMEM;
@@ -1147,14 +1202,14 @@
 
 	bacpy(&reply.bdaddr, &cp->bdaddr);
 	reply.pin_len = cp->pin_len;
-	memcpy(reply.pin_code, cp->pin_code, 16);
+	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
 
 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1165,7 +1220,6 @@
 {
 	struct hci_dev *hdev;
 	struct mgmt_cp_pin_code_neg_reply *cp;
-	struct pending_cmd *cmd;
 	int err;
 
 	BT_DBG("");
@@ -1181,7 +1235,7 @@
 		return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
 									ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1189,20 +1243,10 @@
 		goto failed;
 	}
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
-								data, len);
-	if (!cmd) {
-		err = -ENOMEM;
-		goto failed;
-	}
-
-	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
-								&cp->bdaddr);
-	if (err < 0)
-		mgmt_pending_remove(cmd);
+	err = send_pin_code_neg_reply(sk, index, hdev, cp);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1225,14 +1269,14 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	hdev->io_capability = cp->io_capability;
 
 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
 							hdev->io_capability);
 
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1318,7 +1362,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (cp->io_cap == 0x03) {
 		sec_level = BT_SECURITY_MEDIUM;
@@ -1328,7 +1372,7 @@
 		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 	}
 
-	conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
+	conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type);
 	if (IS_ERR(conn)) {
 		err = PTR_ERR(conn);
 		goto unlock;
@@ -1360,7 +1404,7 @@
 	err = 0;
 
 unlock:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1392,7 +1436,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, mgmt_op, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1410,7 +1454,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1434,7 +1478,7 @@
 	if (!hdev)
 		return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
 	if (!cmd) {
@@ -1449,7 +1493,7 @@
 		mgmt_pending_remove(cmd);
 
 failed:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1468,7 +1512,7 @@
 		return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
 									ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	if (!test_bit(HCI_UP, &hdev->flags)) {
 		err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
@@ -1498,7 +1542,7 @@
 		mgmt_pending_remove(cmd);
 
 unlock:
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1522,7 +1566,7 @@
 		return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
 									ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
 								cp->randomizer);
@@ -1532,7 +1576,7 @@
 		err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
 									0);
 
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1556,7 +1600,7 @@
 		return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
 									ENODEV);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 
 	err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
 	if (err < 0)
@@ -1566,7 +1610,7 @@
 		err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
 								NULL, 0);
 
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 	hci_dev_put(hdev);
 
 	return err;
@@ -1641,6 +1685,70 @@
 	return err;
 }
 
+static int block_device(struct sock *sk, u16 index, unsigned char *data,
+								u16 len)
+{
+	struct hci_dev *hdev;
+	struct mgmt_cp_block_device *cp;
+	int err;
+
+	BT_DBG("hci%u", index);
+
+	cp = (void *) data;
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+							EINVAL);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+							ENODEV);
+
+	err = hci_blacklist_add(hdev, &cp->bdaddr);
+
+	if (err < 0)
+		err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
+	else
+		err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
+							NULL, 0);
+	hci_dev_put(hdev);
+
+	return err;
+}
+
+static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
+								u16 len)
+{
+	struct hci_dev *hdev;
+	struct mgmt_cp_unblock_device *cp;
+	int err;
+
+	BT_DBG("hci%u", index);
+
+	cp = (void *) data;
+
+	if (len != sizeof(*cp))
+		return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+								EINVAL);
+
+	hdev = hci_dev_get(index);
+	if (!hdev)
+		return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+								ENODEV);
+
+	err = hci_blacklist_del(hdev, &cp->bdaddr);
+
+	if (err < 0)
+		err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
+	else
+		err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+								NULL, 0);
+	hci_dev_put(hdev);
+
+	return err;
+}
+
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
 {
 	unsigned char *buf;
@@ -1755,6 +1863,12 @@
 	case MGMT_OP_STOP_DISCOVERY:
 		err = stop_discovery(sk, index);
 		break;
+	case MGMT_OP_BLOCK_DEVICE:
+		err = block_device(sk, index, buf + sizeof(*hdr), len);
+		break;
+	case MGMT_OP_UNBLOCK_DEVICE:
+		err = unblock_device(sk, index, buf + sizeof(*hdr), len);
+		break;
 	default:
 		BT_DBG("Unknown op %u", opcode);
 		err = cmd_status(sk, index, opcode, 0x01);
@@ -1863,17 +1977,28 @@
 
 int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
 {
-	struct mgmt_ev_new_key ev;
+	struct mgmt_ev_new_key *ev;
+	int err, total;
 
-	memset(&ev, 0, sizeof(ev));
+	total = sizeof(struct mgmt_ev_new_key) + key->dlen;
+	ev = kzalloc(total, GFP_ATOMIC);
+	if (!ev)
+		return -ENOMEM;
 
-	ev.store_hint = persistent;
-	bacpy(&ev.key.bdaddr, &key->bdaddr);
-	ev.key.type = key->type;
-	memcpy(ev.key.val, key->val, 16);
-	ev.key.pin_len = key->pin_len;
+	bacpy(&ev->key.bdaddr, &key->bdaddr);
+	ev->key.type = key->type;
+	memcpy(ev->key.val, key->val, 16);
+	ev->key.pin_len = key->pin_len;
+	ev->key.dlen = key->dlen;
+	ev->store_hint = persistent;
 
-	return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+	memcpy(ev->key.data, key->data, key->dlen);
+
+	err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
+
+	kfree(ev);
+
+	return err;
 }
 
 int mgmt_connected(u16 index, bdaddr_t *bdaddr)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5759bb7..8c2e796 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -466,7 +466,6 @@
 
 	switch (d->state) {
 	case BT_CONNECT:
-	case BT_CONFIG:
 		if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
 			set_bit(RFCOMM_AUTH_REJECT, &d->flags);
 			rfcomm_schedule();
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1b10727..7b3638b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -679,7 +679,8 @@
 {
 	struct sock *sk = sock->sk;
 	struct bt_security sec;
-	int len, err = 0;
+	int err = 0;
+	size_t len;
 	u32 opt;
 
 	BT_DBG("sk %p", sk);
@@ -741,7 +742,6 @@
 static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
 {
 	struct sock *sk = sock->sk;
-	struct sock *l2cap_sk;
 	struct rfcomm_conninfo cinfo;
 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
 	int len, err = 0;
@@ -786,7 +786,6 @@
 			break;
 		}
 
-		l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
 
 		memset(&cinfo, 0, sizeof(cinfo));
 		cinfo.hci_handle = conn->hcon->handle;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cb4fb78..febc83a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -177,6 +177,7 @@
 {
 	bdaddr_t *src = &bt_sk(sk)->src;
 	bdaddr_t *dst = &bt_sk(sk)->dst;
+	__u16 pkt_type = sco_pi(sk)->pkt_type;
 	struct sco_conn *conn;
 	struct hci_conn *hcon;
 	struct hci_dev  *hdev;
@@ -192,10 +193,12 @@
 
 	if (lmp_esco_capable(hdev) && !disable_esco)
 		type = ESCO_LINK;
-	else
+	else {
 		type = SCO_LINK;
+		pkt_type &= SCO_ESCO_MASK;
+	}
 
-	hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+	hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
 	if (IS_ERR(hcon)) {
 		err = PTR_ERR(hcon);
 		goto done;
@@ -460,18 +463,22 @@
 	return 0;
 }
 
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
 {
-	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+	struct sockaddr_sco sa;
 	struct sock *sk = sock->sk;
-	bdaddr_t *src = &sa->sco_bdaddr;
-	int err = 0;
+	bdaddr_t *src = &sa.sco_bdaddr;
+	int len, err = 0;
 
-	BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
+	BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
 
+	memset(&sa, 0, sizeof(sa));
+	len = min_t(unsigned int, sizeof(sa), alen);
+	memcpy(&sa, addr, len);
+
 	lock_sock(sk);
 
 	if (sk->sk_state != BT_OPEN) {
@@ -485,7 +492,8 @@
 		err = -EADDRINUSE;
 	} else {
 		/* Save source address */
-		bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+		bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+		sco_pi(sk)->pkt_type = sa.sco_pkt_type;
 		sk->sk_state = BT_BOUND;
 	}
 
@@ -498,27 +506,34 @@
 
 static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
 {
-	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
 	struct sock *sk = sock->sk;
-	int err = 0;
-
+	struct sockaddr_sco sa;
+	int len, err = 0;
 
 	BT_DBG("sk %p", sk);
 
-	if (alen < sizeof(struct sockaddr_sco) ||
-	    addr->sa_family != AF_BLUETOOTH)
+	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
 
-	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
-		return -EBADFD;
-
-	if (sk->sk_type != SOCK_SEQPACKET)
-		return -EINVAL;
+	memset(&sa, 0, sizeof(sa));
+	len = min_t(unsigned int, sizeof(sa), alen);
+	memcpy(&sa, addr, len);
 
 	lock_sock(sk);
 
+	if (sk->sk_type != SOCK_SEQPACKET) {
+		err = -EINVAL;
+		goto done;
+	}
+
+	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+		err = -EBADFD;
+		goto done;
+	}
+
 	/* Set destination address and psm */
-	bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+	bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
+	sco_pi(sk)->pkt_type = sa.sco_pkt_type;
 
 	err = sco_connect(sk);
 	if (err)
@@ -625,6 +640,7 @@
 		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
 	else
 		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+	sa->sco_pkt_type = sco_pi(sk)->pkt_type;
 
 	return 0;
 }
@@ -932,7 +948,7 @@
 		if (conn)
 			sco_conn_ready(conn);
 	} else
-		sco_conn_del(hcon, bt_err(status));
+		sco_conn_del(hcon, bt_to_errno(status));
 
 	return 0;
 }
@@ -944,7 +960,7 @@
 	if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
 		return -EINVAL;
 
-	sco_conn_del(hcon, bt_err(reason));
+	sco_conn_del(hcon, bt_to_errno(reason));
 
 	return 0;
 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
new file mode 100644
index 0000000..391888b
--- /dev/null
+++ b/net/bluetooth/smp.c
@@ -0,0 +1,702 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/smp.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/b128ops.h>
+
+#define SMP_TIMEOUT 30000 /* 30 seconds */
+
+static inline void swap128(u8 src[16], u8 dst[16])
+{
+	int i;
+	for (i = 0; i < 16; i++)
+		dst[15 - i] = src[i];
+}
+
+static inline void swap56(u8 src[7], u8 dst[7])
+{
+	int i;
+	for (i = 0; i < 7; i++)
+		dst[6 - i] = src[i];
+}
+
+static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
+{
+	struct blkcipher_desc desc;
+	struct scatterlist sg;
+	int err, iv_len;
+	unsigned char iv[128];
+
+	if (tfm == NULL) {
+		BT_ERR("tfm %p", tfm);
+		return -EINVAL;
+	}
+
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	err = crypto_blkcipher_setkey(tfm, k, 16);
+	if (err) {
+		BT_ERR("cipher setkey failed: %d", err);
+		return err;
+	}
+
+	sg_init_one(&sg, r, 16);
+
+	iv_len = crypto_blkcipher_ivsize(tfm);
+	if (iv_len) {
+		memset(&iv, 0xff, iv_len);
+		crypto_blkcipher_set_iv(tfm, iv, iv_len);
+	}
+
+	err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
+	if (err)
+		BT_ERR("Encrypt data error %d", err);
+
+	return err;
+}
+
+static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
+		u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
+		u8 _rat, bdaddr_t *ra, u8 res[16])
+{
+	u8 p1[16], p2[16];
+	int err;
+
+	memset(p1, 0, 16);
+
+	/* p1 = pres || preq || _rat || _iat */
+	swap56(pres, p1);
+	swap56(preq, p1 + 7);
+	p1[14] = _rat;
+	p1[15] = _iat;
+
+	memset(p2, 0, 16);
+
+	/* p2 = padding || ia || ra */
+	baswap((bdaddr_t *) (p2 + 4), ia);
+	baswap((bdaddr_t *) (p2 + 10), ra);
+
+	/* res = r XOR p1 */
+	u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
+
+	/* res = e(k, res) */
+	err = smp_e(tfm, k, res);
+	if (err) {
+		BT_ERR("Encrypt data error");
+		return err;
+	}
+
+	/* res = res XOR p2 */
+	u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
+
+	/* res = e(k, res) */
+	err = smp_e(tfm, k, res);
+	if (err)
+		BT_ERR("Encrypt data error");
+
+	return err;
+}
+
+static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
+			u8 r1[16], u8 r2[16], u8 _r[16])
+{
+	int err;
+
+	/* Just least significant octets from r1 and r2 are considered */
+	memcpy(_r, r1 + 8, 8);
+	memcpy(_r + 8, r2 + 8, 8);
+
+	err = smp_e(tfm, k, _r);
+	if (err)
+		BT_ERR("Encrypt data error");
+
+	return err;
+}
+
+static int smp_rand(u8 *buf)
+{
+	get_random_bytes(buf, 16);
+
+	return 0;
+}
+
+static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
+						u16 dlen, void *data)
+{
+	struct sk_buff *skb;
+	struct l2cap_hdr *lh;
+	int len;
+
+	len = L2CAP_HDR_SIZE + sizeof(code) + dlen;
+
+	if (len > conn->mtu)
+		return NULL;
+
+	skb = bt_skb_alloc(len, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+	lh->len = cpu_to_le16(sizeof(code) + dlen);
+	lh->cid = cpu_to_le16(L2CAP_CID_SMP);
+
+	memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
+
+	memcpy(skb_put(skb, dlen), data, dlen);
+
+	return skb;
+}
+
+static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
+{
+	struct sk_buff *skb = smp_build_cmd(conn, code, len, data);
+
+	BT_DBG("code 0x%2.2x", code);
+
+	if (!skb)
+		return;
+
+	hci_send_acl(conn->hcon, skb, 0);
+}
+
+static __u8 seclevel_to_authreq(__u8 level)
+{
+	switch (level) {
+	case BT_SECURITY_HIGH:
+		/* Right now we don't support bonding */
+		return SMP_AUTH_MITM;
+
+	default:
+		return SMP_AUTH_NONE;
+	}
+}
+
+static void build_pairing_cmd(struct l2cap_conn *conn,
+				struct smp_cmd_pairing *req,
+				struct smp_cmd_pairing *rsp,
+				__u8 authreq)
+{
+	u8 dist_keys;
+
+	dist_keys = 0;
+	if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
+		dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
+		authreq |= SMP_AUTH_BONDING;
+	}
+
+	if (rsp == NULL) {
+		req->io_capability = conn->hcon->io_capability;
+		req->oob_flag = SMP_OOB_NOT_PRESENT;
+		req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+		req->init_key_dist = dist_keys;
+		req->resp_key_dist = dist_keys;
+		req->auth_req = authreq;
+		return;
+	}
+
+	rsp->io_capability = conn->hcon->io_capability;
+	rsp->oob_flag = SMP_OOB_NOT_PRESENT;
+	rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+	rsp->init_key_dist = req->init_key_dist & dist_keys;
+	rsp->resp_key_dist = req->resp_key_dist & dist_keys;
+	rsp->auth_req = authreq;
+}
+
+static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
+{
+	if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
+			(max_key_size < SMP_MIN_ENC_KEY_SIZE))
+		return SMP_ENC_KEY_SIZE;
+
+	conn->smp_key_size = max_key_size;
+
+	return 0;
+}
+
+static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+	u8 key_size;
+
+	BT_DBG("conn %p", conn);
+
+	conn->preq[0] = SMP_CMD_PAIRING_REQ;
+	memcpy(&conn->preq[1], req, sizeof(*req));
+	skb_pull(skb, sizeof(*req));
+
+	if (req->oob_flag)
+		return SMP_OOB_NOT_AVAIL;
+
+	/* We didn't start the pairing, so no requirements */
+	build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE);
+
+	key_size = min(req->max_key_size, rsp.max_key_size);
+	if (check_enc_key_size(conn, key_size))
+		return SMP_ENC_KEY_SIZE;
+
+	/* Just works */
+	memset(conn->tk, 0, sizeof(conn->tk));
+
+	conn->prsp[0] = SMP_CMD_PAIRING_RSP;
+	memcpy(&conn->prsp[1], &rsp, sizeof(rsp));
+
+	smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
+
+	mod_timer(&conn->security_timer, jiffies +
+					msecs_to_jiffies(SMP_TIMEOUT));
+
+	return 0;
+}
+
+static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
+	struct smp_cmd_pairing_confirm cp;
+	struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
+	int ret;
+	u8 res[16], key_size;
+
+	BT_DBG("conn %p", conn);
+
+	skb_pull(skb, sizeof(*rsp));
+
+	req = (void *) &conn->preq[1];
+
+	key_size = min(req->max_key_size, rsp->max_key_size);
+	if (check_enc_key_size(conn, key_size))
+		return SMP_ENC_KEY_SIZE;
+
+	if (rsp->oob_flag)
+		return SMP_OOB_NOT_AVAIL;
+
+	/* Just works */
+	memset(conn->tk, 0, sizeof(conn->tk));
+
+	conn->prsp[0] = SMP_CMD_PAIRING_RSP;
+	memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
+
+	ret = smp_rand(conn->prnd);
+	if (ret)
+		return SMP_UNSPECIFIED;
+
+	ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0,
+			conn->src, conn->hcon->dst_type, conn->dst, res);
+	if (ret)
+		return SMP_UNSPECIFIED;
+
+	swap128(res, cp.confirm_val);
+
+	smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+
+	return 0;
+}
+
+static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
+
+	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+	memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf));
+	skb_pull(skb, sizeof(conn->pcnf));
+
+	if (conn->hcon->out) {
+		u8 random[16];
+
+		swap128(conn->prnd, random);
+		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
+								random);
+	} else {
+		struct smp_cmd_pairing_confirm cp;
+		int ret;
+		u8 res[16];
+
+		ret = smp_rand(conn->prnd);
+		if (ret)
+			return SMP_UNSPECIFIED;
+
+		ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
+						conn->hcon->dst_type, conn->dst,
+						0, conn->src, res);
+		if (ret)
+			return SMP_CONFIRM_FAILED;
+
+		swap128(res, cp.confirm_val);
+
+		smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+	}
+
+	mod_timer(&conn->security_timer, jiffies +
+					msecs_to_jiffies(SMP_TIMEOUT));
+
+	return 0;
+}
+
+static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct hci_conn *hcon = conn->hcon;
+	struct crypto_blkcipher *tfm = hcon->hdev->tfm;
+	int ret;
+	u8 key[16], res[16], random[16], confirm[16];
+
+	swap128(skb->data, random);
+	skb_pull(skb, sizeof(random));
+
+	if (conn->hcon->out)
+		ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
+				conn->src, conn->hcon->dst_type, conn->dst,
+				res);
+	else
+		ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
+				conn->hcon->dst_type, conn->dst, 0, conn->src,
+				res);
+	if (ret)
+		return SMP_UNSPECIFIED;
+
+	BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+	swap128(res, confirm);
+
+	if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
+		BT_ERR("Pairing failed (confirmation values mismatch)");
+		return SMP_CONFIRM_FAILED;
+	}
+
+	if (conn->hcon->out) {
+		u8 stk[16], rand[8];
+		__le16 ediv;
+
+		memset(rand, 0, sizeof(rand));
+		ediv = 0;
+
+		smp_s1(tfm, conn->tk, random, conn->prnd, key);
+		swap128(key, stk);
+
+		memset(stk + conn->smp_key_size, 0,
+				SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
+
+		hci_le_start_enc(hcon, ediv, rand, stk);
+		hcon->enc_key_size = conn->smp_key_size;
+	} else {
+		u8 stk[16], r[16], rand[8];
+		__le16 ediv;
+
+		memset(rand, 0, sizeof(rand));
+		ediv = 0;
+
+		swap128(conn->prnd, r);
+		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+
+		smp_s1(tfm, conn->tk, conn->prnd, random, key);
+		swap128(key, stk);
+
+		memset(stk + conn->smp_key_size, 0,
+				SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
+
+		hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size,
+							ediv, rand, stk);
+	}
+
+	return 0;
+}
+
+static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct smp_cmd_security_req *rp = (void *) skb->data;
+	struct smp_cmd_pairing cp;
+	struct hci_conn *hcon = conn->hcon;
+
+	BT_DBG("conn %p", conn);
+
+	if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+		return 0;
+
+	skb_pull(skb, sizeof(*rp));
+
+	memset(&cp, 0, sizeof(cp));
+	build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
+
+	conn->preq[0] = SMP_CMD_PAIRING_REQ;
+	memcpy(&conn->preq[1], &cp, sizeof(cp));
+
+	smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+
+	mod_timer(&conn->security_timer, jiffies +
+					msecs_to_jiffies(SMP_TIMEOUT));
+
+	set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+
+	return 0;
+}
+
+int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
+{
+	struct hci_conn *hcon = conn->hcon;
+	__u8 authreq;
+
+	BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+
+	if (!lmp_host_le_capable(hcon->hdev))
+		return 1;
+
+	if (IS_ERR(hcon->hdev->tfm))
+		return 1;
+
+	if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+		return 0;
+
+	if (sec_level == BT_SECURITY_LOW)
+		return 1;
+
+	if (hcon->sec_level >= sec_level)
+		return 1;
+
+	authreq = seclevel_to_authreq(sec_level);
+
+	if (hcon->link_mode & HCI_LM_MASTER) {
+		struct smp_cmd_pairing cp;
+		struct link_key *key;
+
+		key = hci_find_link_key_type(hcon->hdev, conn->dst,
+							HCI_LK_SMP_LTK);
+		if (key) {
+			struct key_master_id *master = (void *) key->data;
+
+			hci_le_start_enc(hcon, master->ediv, master->rand,
+								key->val);
+			hcon->enc_key_size = key->pin_len;
+
+			goto done;
+		}
+
+		build_pairing_cmd(conn, &cp, NULL, authreq);
+		conn->preq[0] = SMP_CMD_PAIRING_REQ;
+		memcpy(&conn->preq[1], &cp, sizeof(cp));
+
+		mod_timer(&conn->security_timer, jiffies +
+					msecs_to_jiffies(SMP_TIMEOUT));
+
+		smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+	} else {
+		struct smp_cmd_security_req cp;
+		cp.auth_req = authreq;
+		smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+	}
+
+done:
+	hcon->pending_sec_level = sec_level;
+	set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
+
+	return 0;
+}
+
+static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct smp_cmd_encrypt_info *rp = (void *) skb->data;
+
+	skb_pull(skb, sizeof(*rp));
+
+	memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
+
+	return 0;
+}
+
+static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	struct smp_cmd_master_ident *rp = (void *) skb->data;
+
+	skb_pull(skb, sizeof(*rp));
+
+	hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
+						rp->ediv, rp->rand, conn->tk);
+
+	smp_distribute_keys(conn, 1);
+
+	return 0;
+}
+
+int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+	__u8 code = skb->data[0];
+	__u8 reason;
+	int err = 0;
+
+	if (!lmp_host_le_capable(conn->hcon->hdev)) {
+		err = -ENOTSUPP;
+		reason = SMP_PAIRING_NOTSUPP;
+		goto done;
+	}
+
+	if (IS_ERR(conn->hcon->hdev->tfm)) {
+		err = PTR_ERR(conn->hcon->hdev->tfm);
+		reason = SMP_PAIRING_NOTSUPP;
+		goto done;
+	}
+
+	skb_pull(skb, sizeof(code));
+
+	switch (code) {
+	case SMP_CMD_PAIRING_REQ:
+		reason = smp_cmd_pairing_req(conn, skb);
+		break;
+
+	case SMP_CMD_PAIRING_FAIL:
+		reason = 0;
+		err = -EPERM;
+		break;
+
+	case SMP_CMD_PAIRING_RSP:
+		reason = smp_cmd_pairing_rsp(conn, skb);
+		break;
+
+	case SMP_CMD_SECURITY_REQ:
+		reason = smp_cmd_security_req(conn, skb);
+		break;
+
+	case SMP_CMD_PAIRING_CONFIRM:
+		reason = smp_cmd_pairing_confirm(conn, skb);
+		break;
+
+	case SMP_CMD_PAIRING_RANDOM:
+		reason = smp_cmd_pairing_random(conn, skb);
+		break;
+
+	case SMP_CMD_ENCRYPT_INFO:
+		reason = smp_cmd_encrypt_info(conn, skb);
+		break;
+
+	case SMP_CMD_MASTER_IDENT:
+		reason = smp_cmd_master_ident(conn, skb);
+		break;
+
+	case SMP_CMD_IDENT_INFO:
+	case SMP_CMD_IDENT_ADDR_INFO:
+	case SMP_CMD_SIGN_INFO:
+		/* Just ignored */
+		reason = 0;
+		break;
+
+	default:
+		BT_DBG("Unknown command code 0x%2.2x", code);
+
+		reason = SMP_CMD_NOTSUPP;
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
+done:
+	if (reason)
+		smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+								&reason);
+
+	kfree_skb(skb);
+	return err;
+}
+
+int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
+{
+	struct smp_cmd_pairing *req, *rsp;
+	__u8 *keydist;
+
+	BT_DBG("conn %p force %d", conn, force);
+
+	if (IS_ERR(conn->hcon->hdev->tfm))
+		return PTR_ERR(conn->hcon->hdev->tfm);
+
+	rsp = (void *) &conn->prsp[1];
+
+	/* The responder sends its keys first */
+	if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
+		return 0;
+
+	req = (void *) &conn->preq[1];
+
+	if (conn->hcon->out) {
+		keydist = &rsp->init_key_dist;
+		*keydist &= req->init_key_dist;
+	} else {
+		keydist = &rsp->resp_key_dist;
+		*keydist &= req->resp_key_dist;
+	}
+
+
+	BT_DBG("keydist 0x%x", *keydist);
+
+	if (*keydist & SMP_DIST_ENC_KEY) {
+		struct smp_cmd_encrypt_info enc;
+		struct smp_cmd_master_ident ident;
+		__le16 ediv;
+
+		get_random_bytes(enc.ltk, sizeof(enc.ltk));
+		get_random_bytes(&ediv, sizeof(ediv));
+		get_random_bytes(ident.rand, sizeof(ident.rand));
+
+		smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
+
+		hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size,
+						ediv, ident.rand, enc.ltk);
+
+		ident.ediv = cpu_to_le16(ediv);
+
+		smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
+
+		*keydist &= ~SMP_DIST_ENC_KEY;
+	}
+
+	if (*keydist & SMP_DIST_ID_KEY) {
+		struct smp_cmd_ident_addr_info addrinfo;
+		struct smp_cmd_ident_info idinfo;
+
+		/* Send a dummy key */
+		get_random_bytes(idinfo.irk, sizeof(idinfo.irk));
+
+		smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
+
+		/* Just public address */
+		memset(&addrinfo, 0, sizeof(addrinfo));
+		bacpy(&addrinfo.bdaddr, conn->src);
+
+		smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
+								&addrinfo);
+
+		*keydist &= ~SMP_DIST_ID_KEY;
+	}
+
+	if (*keydist & SMP_DIST_SIGN) {
+		struct smp_cmd_sign_info sign;
+
+		/* Send a dummy key */
+		get_random_bytes(sign.csrk, sizeof(sign.csrk));
+
+		smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
+
+		*keydist &= ~SMP_DIST_SIGN;
+	}
+
+	return 0;
+}
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f2dc69c..681084d 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -14,6 +14,7 @@
 	     inet_fragment.o ping.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ef1528a..4d60f12 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -118,6 +118,19 @@
 #include <linux/mroute.h>
 #endif
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -258,6 +271,7 @@
 	return ipprot->netns_ok;
 }
 
+
 /*
  *	Create an inet socket.
  */
@@ -274,6 +288,9 @@
 	int try_loading_module = 0;
 	int err;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	if (unlikely(!inet_ehash_secret))
 		if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
 			build_ehash_secret();
@@ -874,6 +891,7 @@
 	case SIOCSIFPFLAGS:
 	case SIOCGIFPFLAGS:
 	case SIOCSIFFLAGS:
+	case SIOCKILLADDR:
 		err = devinet_ioctl(net, cmd, (void __user *)arg);
 		break;
 	default:
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 0d4a184..a829824 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -59,6 +59,7 @@
 
 #include <net/arp.h>
 #include <net/ip.h>
+#include <net/tcp.h>
 #include <net/route.h>
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
@@ -735,6 +736,7 @@
 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
 	case SIOCSIFDSTADDR:	/* Set the destination address */
 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
+	case SIOCKILLADDR:	/* Nuke all sockets on this address */
 		ret = -EACCES;
 		if (!capable(CAP_NET_ADMIN))
 			goto out;
@@ -786,7 +788,8 @@
 	}
 
 	ret = -EADDRNOTAVAIL;
-	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
+	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
+	    && cmd != SIOCKILLADDR)
 		goto done;
 
 	switch (cmd) {
@@ -912,6 +915,9 @@
 			inet_insert_ifa(ifa);
 		}
 		break;
+	case SIOCKILLADDR:	/* Nuke all connections on this address */
+		ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
+		break;
 	}
 done:
 	rtnl_unlock();
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a..73b4e91 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -113,6 +113,18 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_TARGET_REJECT_SKERR
+	bool "Force socket error when rejecting with icmp*"
+	depends on IP_NF_TARGET_REJECT
+	default n
+	help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+	  The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+	  If unsure, say N.
+
 config IP_NF_TARGET_LOG
 	tristate "LOG target support"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 51f13f8..9dd754c 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -128,6 +128,14 @@
 static inline void send_unreach(struct sk_buff *skb_in, int code)
 {
 	icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
+	if (skb_in->sk) {
+		skb_in->sk->sk_err = icmp_err_convert[code].errno;
+		skb_in->sk->sk_error_report(skb_in->sk);
+		pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
+			skb_in->sk->sk_err, skb_in, skb_in->sk);
+	}
+#endif
 }
 
 static unsigned int
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 0000000..0cbbf10
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+			    struct kobj_attribute *attr, char *buf) \
+{ \
+	return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+			     struct kobj_attribute *attr, \
+			     const char *buf, size_t count) \
+{ \
+	int val, ret; \
+	ret = sscanf(buf, "%d", &val); \
+	if (ret != 1) \
+		return -EINVAL; \
+	if (val < 0) \
+		return -EINVAL; \
+	_var = val; \
+	return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+	__ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+	&tcp_wmem_min_attr.attr,
+	&tcp_wmem_def_attr.attr,
+	&tcp_wmem_max_attr.attr,
+	&tcp_rmem_min_attr.attr,
+	&tcp_rmem_def_attr.attr,
+	&tcp_rmem_max_attr.attr,
+	NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+	.attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+	struct kobject *ipv4_kobject;
+	int ret;
+
+	ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+	if (!ipv4_kobject)
+		return -ENOMEM;
+
+	ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+	if (ret) {
+		kobject_put(ipv4_kobject);
+		return ret;
+	}
+
+	return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febca..5eb7af2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -266,11 +266,13 @@
 #include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
+#include <linux/uid_stat.h>
 
 #include <net/icmp.h>
 #include <net/tcp.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <net/ip6_route.h>
 #include <net/netdma.h>
 #include <net/sock.h>
 
@@ -1112,6 +1114,9 @@
 	if (copied)
 		tcp_push(sk, flags, mss_now, tp->nonagle);
 	release_sock(sk);
+
+	if (copied > 0)
+		uid_stat_tcp_snd(current_uid(), copied);
 	return copied;
 
 do_fault:
@@ -1388,8 +1393,11 @@
 	tcp_rcv_space_adjust(sk);
 
 	/* Clean up data we have read: This will do ACK frames. */
-	if (copied > 0)
+	if (copied > 0) {
 		tcp_cleanup_rbuf(sk, copied);
+		uid_stat_tcp_rcv(current_uid(), copied);
+	}
+
 	return copied;
 }
 EXPORT_SYMBOL(tcp_read_sock);
@@ -1771,6 +1779,9 @@
 	tcp_cleanup_rbuf(sk, copied);
 
 	release_sock(sk);
+
+	if (copied > 0)
+		uid_stat_tcp_rcv(current_uid(), copied);
 	return copied;
 
 out:
@@ -1779,6 +1790,8 @@
 
 recv_urg:
 	err = tcp_recv_urg(sk, msg, len, flags);
+	if (err > 0)
+		uid_stat_tcp_rcv(current_uid(), err);
 	goto out;
 }
 EXPORT_SYMBOL(tcp_recvmsg);
@@ -3310,3 +3323,100 @@
 	tcp_secret_retiring = &tcp_secret_two;
 	tcp_secret_secondary = &tcp_secret_two;
 }
+
+static int tcp_is_local(struct net *net, __be32 addr) {
+	struct rtable *rt;
+	struct flowi4 fl4 = { .daddr = addr };
+	rt = ip_route_output_key(net, &fl4);
+	if (IS_ERR_OR_NULL(rt))
+		return 0;
+	return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
+	struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
+	return rt6 && rt6->rt6i_dev && (rt6->rt6i_dev->flags & IFF_LOOPBACK);
+}
+#endif
+
+/*
+ * tcp_nuke_addr - destroy all sockets on the given local address
+ * if local address is the unspecified address (0.0.0.0 or ::), destroy all
+ * sockets with local addresses that are not configured.
+ */
+int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
+{
+	int family = addr->sa_family;
+	unsigned int bucket;
+
+	struct in_addr *in;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct in6_addr *in6;
+#endif
+	if (family == AF_INET) {
+		in = &((struct sockaddr_in *)addr)->sin_addr;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	} else if (family == AF_INET6) {
+		in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
+#endif
+	} else {
+		return -EAFNOSUPPORT;
+	}
+
+	for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+		struct hlist_nulls_node *node;
+		struct sock *sk;
+		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+		spin_lock_bh(lock);
+		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+			struct inet_sock *inet = inet_sk(sk);
+
+			if (family == AF_INET) {
+				__be32 s4 = inet->inet_rcv_saddr;
+				if (in->s_addr != s4 &&
+				    !(in->s_addr == INADDR_ANY &&
+				      !tcp_is_local(net, s4)))
+					continue;
+			}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+			if (family == AF_INET6) {
+				struct in6_addr *s6;
+				if (!inet->pinet6)
+					continue;
+				s6 = &inet->pinet6->rcv_saddr;
+				if (!ipv6_addr_equal(in6, s6) &&
+				    !(ipv6_addr_equal(in6, &in6addr_any) &&
+				      !tcp_is_local6(net, s6)))
+				continue;
+			}
+#endif
+
+			if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+				continue;
+			if (sock_flag(sk, SOCK_DEAD))
+				continue;
+
+			sock_hold(sk);
+			spin_unlock_bh(lock);
+
+			local_bh_disable();
+			bh_lock_sock(sk);
+			sk->sk_err = ETIMEDOUT;
+			sk->sk_error_report(sk);
+
+			tcp_done(sk);
+			bh_unlock_sock(sk);
+			local_bh_enable();
+			sock_put(sk);
+
+			goto restart;
+		}
+		spin_unlock_bh(lock);
+	}
+
+	return 0;
+}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3b5669a..ab86578 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -63,6 +63,20 @@
 #include <asm/system.h>
 #include <linux/mroute6.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
+
 MODULE_AUTHOR("Cast of dozens");
 MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
 MODULE_LICENSE("GPL");
@@ -109,6 +123,9 @@
 	int try_loading_module = 0;
 	int err;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	if (sock->type != SOCK_RAW &&
 	    sock->type != SOCK_DGRAM &&
 	    !inet_ehash_secret)
@@ -477,6 +494,21 @@
 
 EXPORT_SYMBOL(inet6_getname);
 
+int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
+	struct in6_ifreq ireq;
+	struct sockaddr_in6 sin6;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EACCES;
+
+	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
+		return -EFAULT;
+
+	sin6.sin6_family = AF_INET6;
+	ipv6_addr_copy(&sin6.sin6_addr, &ireq.ifr6_addr);
+	return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
+}
+
 int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
 	struct sock *sk = sock->sk;
@@ -501,6 +533,8 @@
 		return addrconf_del_ifaddr(net, (void __user *) arg);
 	case SIOCSIFDSTADDR:
 		return addrconf_set_dstaddr(net, (void __user *) arg);
+	case SIOCKILLADDR:
+		return inet6_killaddr_ioctl(net, (void __user *) arg);
 	default:
 		if (!sk->sk_prot->ioctl)
 			return -ENOIOCTLCMD;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 4484648..5bbf531 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -174,6 +174,18 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_TARGET_REJECT_SKERR
+	bool "Force socket error when rejecting with icmp*"
+	depends on IP6_NF_TARGET_REJECT
+	default n
+	help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+	  The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+	  If unsure, say N.
+
 config IP6_NF_MANGLE
 	tristate "Packet mangling"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5d..09d3049 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -177,6 +177,15 @@
 		skb_in->dev = net->loopback_dev;
 
 	icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
+	if (skb_in->sk) {
+		icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
+				   &skb_in->sk->sk_err);
+		skb_in->sk->sk_error_report(skb_in->sk);
+		pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
+			skb_in->sk->sk_err, skb_in, skb_in->sk);
+	}
+#endif
 }
 
 static unsigned int
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d..ddb7bb5 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -902,6 +902,8 @@
 	based on who created the socket: the user or group. It is also
 	possible to check whether a socket actually exists.
 
+	Conflicts with '"quota, tag, uid" match'
+
 config NETFILTER_XT_MATCH_POLICY
 	tristate 'IPsec "policy" match support'
 	depends on XFRM
@@ -935,6 +937,22 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+	bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+	depends on NETFILTER_XT_MATCH_OWNER=n
+	help
+	  This option replaces the `owner' match. In addition to matching
+	  on uid, it keeps stats based on a tag assigned to a socket.
+	  The full tag is comprised of a UID and an accounting tag.
+	  The tags are assignable to sockets from user space (e.g. a download
+	  manager can assign the socket to another UID for accounting).
+	  Stats and control are done via /proc/net/xt_qtaguid/.
+	  It replaces owner as it takes the same arguments, but should
+	  really be recognized by the iptables tool.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
 	tristate '"quota" match support'
 	depends on NETFILTER_ADVANCED
@@ -945,6 +963,18 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853..8b658ef 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -95,7 +95,9 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 0000000..49ed432
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,1271 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* TODO: support ipv6 for iface_stat */
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#include <linux/netfilter/xt_socket.h>
+/* We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values. */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+	((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policying, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belong to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion seperately.
+ *
+ * There could be
+ * a:  {acct_tag=1, uid_tag=10003}
+ * b:  {acct_tag=2, uid_tag=10003}
+ * c:  {acct_tag=3, uid_tag=10003}
+ * d:  {acct_tag=0, uid_tag=10003}
+ * (a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t;  /* Only used via accessors */
+
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+
+enum ifs_tx_rx {
+	IFS_TX,
+	IFS_RX,
+	IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+	IFS_TCP,
+	IFS_UDP,
+	IFS_PROTO_OTHER,
+	IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+	uint64_t bytes;
+	uint64_t packets;
+};
+
+struct data_counters {
+	struct byte_packet_counters bpc[IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+struct tag_stat {
+	struct rb_node node;
+	tag_t tag;
+
+	struct data_counters counters;
+	/* If this tag is acct_tag based, we need to count against the
+	 * matching parent uid_tag. */
+	struct data_counters *parent_counters;
+	struct proc_dir_entry *proc_ptr;
+};
+
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+struct iface_stat {
+	struct list_head list;
+	char *ifname;
+	uint64_t rx_bytes;
+	uint64_t rx_packets;
+	uint64_t tx_bytes;
+	uint64_t tx_packets;
+	bool active;
+	struct proc_dir_entry *proc_ptr;
+
+	struct rb_root tag_stat_tree;
+	spinlock_t tag_stat_list_lock;
+};
+
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+/*
+ * Track tag that this socket is transferring data for, and not necesseraly
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ */
+struct sock_tag {
+	struct rb_node node;
+	struct sock *sk;
+	tag_t tag;
+};
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par);
+
+/*----------------------------------------------*/
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+	return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+	return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+	return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+	return tag & 0xFFFFFFFFULL;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+	return tag & 0xFFFFFFFFULL;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+	return tag & ~0xFFFFFFFFULL;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+	return !(tag & 0xFFFFFFFFULL);
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters,
+				  enum ifs_tx_rx direction,
+				  enum ifs_proto ifs_proto,
+				  int bytes,
+				  int packets)
+{
+	counters->bpc[direction][ifs_proto].bytes += bytes;
+	counters->bpc[direction][ifs_proto].packets += packets;
+}
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+				    enum ifs_tx_rx direction)
+{
+	return counters->bpc[direction][IFS_TCP].bytes
+		+ counters->bpc[direction][IFS_UDP].bytes
+		+ counters->bpc[direction][IFS_PROTO_OTHER].bytes;
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct tag_stat *data = rb_entry(node, struct tag_stat, node);
+		int result = tag_compare(tag, data->tag);
+		pr_debug("qtaguid: tag_stat_tree_search(): tag=0x%llx"
+			 " (uid=%d)\n",
+			 data->tag,
+			 get_uid_from_tag(data->tag));
+
+		if (result < 0)
+			node = node->rb_left;
+		else if (result > 0)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct tag_stat *this = rb_entry(*new, struct tag_stat,
+						 node);
+		int result = tag_compare(data->tag, this->tag);
+		pr_debug("qtaguid: tag_stat_tree_insert(): tag=0x%llx"
+			 " (uid=%d)\n",
+			 this->tag,
+			 get_uid_from_tag(this->tag));
+		parent = *new;
+		if (result < 0)
+			new = &((*new)->rb_left);
+		else if (result > 0)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+					     const struct sock *sk)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct sock_tag *data = rb_entry(node, struct sock_tag, node);
+		ptrdiff_t result = sk - data->sk;
+		if (result < 0)
+			node = node->rb_left;
+		else if (result > 0)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct sock_tag *this = rb_entry(*new, struct sock_tag, node);
+		ptrdiff_t result = data->sk - this->sk;
+		parent = *new;
+		if (result < 0)
+			new = &((*new)->rb_left);
+		else if (result > 0)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static int read_proc_u64(char *page, char **start, off_t off,
+			int count, int *eof, void *data)
+{
+	int len;
+	uint64_t value;
+	char *p = page;
+	uint64_t *iface_entry = data;
+	if (!data)
+		return 0;
+
+	value = *iface_entry;
+	p += sprintf(p, "%llu\n", value);
+	len = (p - page) - off;
+	*eof = (len <= count) ? 1 : 0;
+	*start = page + off;
+	return len;
+}
+
+static int read_proc_bool(char *page, char **start, off_t off,
+			int count, int *eof, void *data)
+{
+	int len;
+	bool value;
+	char *p = page;
+	bool *bool_entry = data;
+	if (!data)
+		return 0;
+
+	value = *bool_entry;
+	p += sprintf(p, "%u\n", value);
+	len = (p - page) - off;
+	*eof = (len <= count) ? 1 : 0;
+	*start = page + off;
+	return len;
+}
+
+/* Find the entry for tracking the specified interface. */
+static struct iface_stat *get_iface_stat(const char *ifname)
+{
+	unsigned long flags;
+	struct iface_stat *iface_entry;
+	if (!ifname)
+		return NULL;
+
+	spin_lock_irqsave(&iface_stat_list_lock, flags);
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		if (!strcmp(iface_entry->ifname, ifname))
+			goto done;
+	}
+	iface_entry = NULL;
+done:
+	spin_unlock_irqrestore(&iface_stat_list_lock, flags);
+	return iface_entry;
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+void iface_stat_create(const struct net_device *net_dev)
+{
+	struct in_device *in_dev;
+	unsigned long flags;
+	struct iface_stat *new_iface;
+	struct proc_dir_entry *proc_entry;
+	const char *ifname;
+	struct iface_stat *entry;
+	__be32 ipaddr = 0;
+	struct in_ifaddr *ifa = NULL;
+
+	ASSERT_RTNL(); /* No need for separate locking */
+
+	pr_debug("iface_stat: create(): netdev=%p->name=%s\n",
+		 net_dev, net_dev ? net_dev->name : "");
+	if (!net_dev) {
+		pr_err("iface_stat: create(): no net dev!\n");
+		return;
+	}
+
+	in_dev = __in_dev_get_rtnl(net_dev);
+	if (!in_dev) {
+		pr_err("iface_stat: create(): no inet dev!\n");
+		return;
+	}
+
+	pr_debug("iface_stat: create(): in_dev=%p\n", in_dev);
+	ifname = net_dev->name;
+	pr_debug("iface_stat: create(): ifname=%p\n", ifname);
+	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+		pr_debug("iface_stat: create(): for(): ifa=%p ifname=%p\n",
+			 ifa, ifname);
+		pr_debug("iface_stat: create(): ifname=%s ifa_label=%s\n",
+			 ifname, ifa->ifa_label ? ifa->ifa_label : "(null)");
+		if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+			break;
+	}
+
+	if (ifa) {
+		ipaddr = ifa->ifa_local;
+	} else {
+		pr_err("iface_stat: create(): dev %s has no matching IP\n",
+		       ifname);
+		return;
+	}
+
+	entry = get_iface_stat(net_dev->name);
+	if (entry != NULL) {
+		pr_debug("iface_stat: create(): dev %s entry=%p\n", ifname,
+			 entry);
+		if (ipv4_is_loopback(ipaddr)) {
+			entry->active = false;
+			pr_debug("iface_stat: create(): disable tracking of "
+				 "loopback dev %s\n", ifname);
+		} else {
+			entry->active = true;
+			pr_debug("iface_stat: create(): enable tracking of "
+				 "dev %s with ip=%pI4\n",
+				 ifname, &ipaddr);
+		}
+		return;
+	} else if (ipv4_is_loopback(ipaddr)) {
+		pr_debug("iface_stat: create(): ignore loopback dev %s"
+			 " ip=%pI4\n", ifname, &ipaddr);
+		return;
+	}
+
+	new_iface = kmalloc(sizeof(*new_iface), GFP_KERNEL);
+	if (new_iface == NULL) {
+		pr_err("iface_stat: create(): failed to alloc iface_stat\n");
+		return;
+	}
+	memset(new_iface, 0, sizeof(*new_iface));
+	new_iface->ifname = kstrdup(ifname, GFP_KERNEL);
+	if (new_iface->ifname == NULL) {
+		pr_err("iface_stat: create(): failed to alloc ifname\n");
+		kfree(new_iface);
+		return;
+	}
+	spin_lock_init(&new_iface->tag_stat_list_lock);
+
+	new_iface->active = true;
+
+	new_iface->tag_stat_tree = RB_ROOT;
+	spin_lock_irqsave(&iface_stat_list_lock, flags);
+	list_add(&new_iface->list, &iface_stat_list);
+	spin_unlock_irqrestore(&iface_stat_list_lock, flags);
+
+	proc_entry = proc_mkdir(ifname, iface_stat_procdir);
+	new_iface->proc_ptr = proc_entry;
+
+	/* TODO: make root access only */
+	create_proc_read_entry("tx_bytes", S_IRUGO, proc_entry,
+			read_proc_u64, &new_iface->tx_bytes);
+	create_proc_read_entry("rx_bytes", S_IRUGO, proc_entry,
+			read_proc_u64, &new_iface->rx_bytes);
+	create_proc_read_entry("tx_packets", S_IRUGO, proc_entry,
+			read_proc_u64, &new_iface->tx_packets);
+	create_proc_read_entry("rx_packets", S_IRUGO, proc_entry,
+			read_proc_u64, &new_iface->rx_packets);
+	create_proc_read_entry("active", S_IRUGO, proc_entry,
+			read_proc_bool, &new_iface->active);
+
+	pr_debug("iface_stat: create(): done entry=%p dev=%s ip=%pI4\n",
+		 new_iface, ifname, &ipaddr);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+	pr_debug("xt_qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+	return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+	unsigned long flags;
+	struct sock_tag *sock_tag_entry;
+	pr_debug("xt_qtaguid: get_sock_stat(sk=%p)\n", sk);
+	if (!sk)
+		return NULL;
+	spin_lock_irqsave(&sock_tag_list_lock, flags);
+	sock_tag_entry = get_sock_stat_nl(sk);
+	spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+	return sock_tag_entry;
+}
+
+static void
+data_counters_update(struct data_counters *dc,  enum ifs_tx_rx direction,
+		int proto, int bytes)
+{
+	switch (proto) {
+	case IPPROTO_TCP:
+		dc_add_byte_packets(dc, direction, IFS_TCP, bytes, 1);
+		break;
+	case IPPROTO_UDP:
+		dc_add_byte_packets(dc, direction, IFS_UDP, bytes, 1);
+		break;
+	case IPPROTO_IP:
+	default:
+		dc_add_byte_packets(dc, direction, IFS_PROTO_OTHER, bytes, 1);
+		break;
+	}
+}
+
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+void iface_stat_update(struct net_device *dev)
+{
+	struct rtnl_link_stats64 dev_stats, *stats;
+	struct iface_stat *entry;
+	stats = dev_get_stats(dev, &dev_stats);
+	ASSERT_RTNL();
+
+	entry = get_iface_stat(dev->name);
+	if (entry == NULL) {
+		pr_debug("iface_stat: dev %s monitor not found\n", dev->name);
+		return;
+	}
+	if (entry->active) {
+		entry->tx_bytes += stats->tx_bytes;
+		entry->tx_packets += stats->tx_packets;
+		entry->rx_bytes += stats->rx_bytes;
+		entry->rx_packets += stats->rx_packets;
+		entry->active = false;
+		pr_debug("iface_stat: Updating stats for "
+			"dev %s which went down\n", dev->name);
+	} else {
+		pr_debug("iface_stat: Did not update stats for "
+			"dev %s which went down\n", dev->name);
+	}
+}
+
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+			enum ifs_tx_rx direction, int proto, int bytes)
+{
+	pr_debug("xt_qtaguid: tag_stat_update(tag=0x%llx (uid=%d) dir=%d "
+		"proto=%d bytes=%d)\n",
+		tag_entry->tag, get_uid_from_tag(tag_entry->tag), direction,
+		proto, bytes);
+	data_counters_update(&tag_entry->counters, direction, proto, bytes);
+	if (tag_entry->parent_counters)
+		data_counters_update(tag_entry->parent_counters, direction,
+				proto, bytes);
+}
+
+
+/* Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held. */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+					   tag_t tag)
+{
+	struct tag_stat *new_tag_stat_entry = NULL;
+	pr_debug("iface_stat: create_if_tag_stat(): ife=%p tag=0x%llx"
+		 " (uid=%d)\n",
+		 iface_entry, tag, get_uid_from_tag(tag));
+	new_tag_stat_entry = kmalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+	if (!new_tag_stat_entry) {
+		pr_err("iface_stat: failed to alloc new tag entry\n");
+		goto done;
+	}
+	memset(new_tag_stat_entry, 0, sizeof(*new_tag_stat_entry));
+	new_tag_stat_entry->tag = tag;
+	tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+	return new_tag_stat_entry;
+}
+
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+	struct iface_stat *iface_entry;
+	unsigned long flags;
+
+	/* Find the entry for tracking the specified tag within the interface */
+	if (ifname == NULL) {
+		pr_info("iface_stat: NULL device name\n");
+		return NULL;
+	}
+
+
+	/* Iterate over interfaces */
+	spin_lock_irqsave(&iface_stat_list_lock, flags);
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		if (!strcmp(ifname, iface_entry->ifname))
+			goto done;
+	}
+	iface_entry = NULL;
+done:
+	spin_unlock_irqrestore(&iface_stat_list_lock, flags);
+	return iface_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+			       const struct sock *sk, enum ifs_tx_rx direction,
+			       int proto, int bytes)
+{
+	struct tag_stat *tag_stat_entry;
+	tag_t tag, acct_tag;
+	tag_t uid_tag;
+	struct data_counters *uid_tag_counters;
+	struct sock_tag *sock_tag_entry;
+	struct iface_stat *iface_entry;
+	unsigned long flags;
+	struct tag_stat *new_tag_stat;
+	pr_debug("xt_qtaguid: if_tag_stat_update(ifname=%s "
+		"uid=%d sk=%p dir=%d proto=%d bytes=%d)\n",
+		 ifname, uid, sk, direction, proto, bytes);
+
+
+	iface_entry = get_iface_entry(ifname);
+	if (!iface_entry) {
+		pr_err("iface_stat: interface %s not found\n", ifname);
+		return;
+	}
+	/* else { If the iface_entry becomes inactive, it is still ok
+	 * to process the data. } */
+
+	pr_debug("iface_stat: stat_update() got entry=%p\n", iface_entry);
+
+	/* Look for a tagged sock.
+	 * It will have an acct_uid. */
+	sock_tag_entry = get_sock_stat(sk);
+	if (sock_tag_entry) {
+		tag = sock_tag_entry->tag;
+		acct_tag = get_atag_from_tag(tag);
+		uid_tag = get_utag_from_tag(tag);
+	} else {
+		uid_tag = make_tag_from_uid(uid);
+		acct_tag = 0;
+		tag = combine_atag_with_uid(acct_tag, uid);
+	}
+	pr_debug("iface_stat: stat_update(): looking for tag=0x%llx (uid=%d)"
+		 " in ife=%p\n",
+		 tag, get_uid_from_tag(tag), iface_entry);
+	/* Loop over tag list under this interface for {acct_tag,uid_tag} */
+	spin_lock_irqsave(&iface_entry->tag_stat_list_lock, flags);
+
+	tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+					      tag);
+	if (tag_stat_entry) {
+		/* Updating the {acct_tag, uid_tag} entry handles both stats:
+		 * {0, uid_tag} will also get updated. */
+		tag_stat_update(tag_stat_entry, direction, proto, bytes);
+		spin_unlock_irqrestore(&iface_entry->tag_stat_list_lock, flags);
+		return;
+	}
+
+	/* Loop over tag list under this interface for {0,uid_tag} */
+	tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+					      uid_tag);
+	if (!tag_stat_entry) {
+		/* Here: the base uid_tag did not exist */
+		/*
+		 * No parent counters. So
+		 *  - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+		 */
+		new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+		uid_tag_counters = &new_tag_stat->counters;
+	} else {
+		uid_tag_counters = &tag_stat_entry->counters;
+	}
+
+	if (acct_tag) {
+		new_tag_stat = create_if_tag_stat(iface_entry, tag);
+		new_tag_stat->parent_counters = uid_tag_counters;
+	}
+	spin_unlock_irqrestore(&iface_entry->tag_stat_list_lock, flags);
+	tag_stat_update(new_tag_stat, direction, proto, bytes);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+				      unsigned long event, void *ptr) {
+	struct net_device *dev = ptr;
+
+	pr_debug("iface_stat: netdev_event(): ev=0x%lx netdev=%p->name=%s\n",
+		 event, dev, dev ? dev->name : "");
+
+	switch (event) {
+	case NETDEV_UP:
+	case NETDEV_REBOOT:
+	case NETDEV_CHANGE:
+	case NETDEV_REGISTER:  /* Most likely no IP */
+	case NETDEV_CHANGEADDR:  /* MAC addr change */
+	case NETDEV_CHANGENAME:
+	case NETDEV_FEAT_CHANGE:  /* Might be usefull when cell type changes */
+		iface_stat_create(dev);
+		break;
+	case NETDEV_UNREGISTER:
+		iface_stat_update(dev);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+					unsigned long event, void *ptr) {
+
+	struct in_ifaddr *ifa = ptr;
+	struct in_device *in_dev = ifa->ifa_dev;
+	struct net_device *dev = in_dev->dev;
+
+	pr_debug("iface_stat: inetaddr_event(): ev=0x%lx netdev=%p->name=%s\n",
+		 event, dev, dev ? dev->name : "");
+
+	switch (event) {
+	case NETDEV_UP:
+		iface_stat_create(dev);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+	.notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+	.notifier_call = iface_inetaddr_event_handler,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+	int err;
+
+	iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+	if (!iface_stat_procdir) {
+		pr_err("iface_stat: failed to create proc entry\n");
+		err = -1;
+		goto err;
+	}
+	err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+	if (err) {
+		pr_err("iface_stat: failed to register dev event handler\n");
+		goto err_unreg_nd;
+	}
+	err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+	if (err) {
+		pr_err("iface_stat: failed to register dev event handler\n");
+		goto err_zap_entry;
+	}
+	return 0;
+
+err_unreg_nd:
+	unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_entry:
+	remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+	return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+				    struct xt_action_param *par)
+{
+	struct sock *sk;
+	unsigned int hook_mask = (1 << par->hooknum);
+
+	pr_debug("xt_qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+		 par->hooknum, par->family);
+
+	/* Let's not abuse the the xt_socket_get*_sk(), or else it will
+	 * return garbage SKs. */
+	if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+		return NULL;
+
+	switch (par->family) {
+	case NFPROTO_IPV6:
+		sk = xt_socket_get6_sk(skb, par);
+		break;
+	case NFPROTO_IPV4:
+		sk = xt_socket_get4_sk(skb, par);
+		break;
+	default:
+		return NULL;
+	}
+
+	/* Seems to be issues on the file ptr for TCP_TIME_WAIT SKs.
+	 * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959
+	 * Not fixed in 3.0-r3 :(
+	 */
+	if (sk) {
+		pr_debug("xt_qtaguid: %p->sk_proto=%u "
+			 "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+		if (sk->sk_state  == TCP_TIME_WAIT) {
+			xt_socket_put_sk(sk);
+			sk = NULL;
+		}
+	}
+	return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+			    const struct sock *alternate_sk, uid_t uid,
+			    struct xt_action_param *par)
+{
+	const struct net_device *el_dev;
+
+	if (!skb->dev) {
+		pr_debug("xt_qtaguid[%d]: no skb->dev\n", par->hooknum);
+		el_dev = par->in ? : par->out;
+	} else {
+		const struct net_device *other_dev;
+		el_dev = skb->dev;
+		other_dev = par->in ? : par->out;
+		if (el_dev != other_dev) {
+			pr_debug("xt_qtaguid[%d]: skb->dev=%p %s vs "
+				"par->(in/out)=%p %s\n",
+				par->hooknum, el_dev, el_dev->name, other_dev,
+				other_dev->name);
+		}
+	}
+
+	if (unlikely(!el_dev)) {
+		pr_info("xt_qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+	} else if (unlikely(!el_dev->name)) {
+		pr_info("xt_qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+	} else {
+		pr_debug("xt_qtaguid[%d]: dev name=%s type=%d\n",
+			par->hooknum,
+			el_dev->name,
+			el_dev->type);
+
+		if_tag_stat_update(el_dev->name, uid,
+				skb->sk ? skb->sk : alternate_sk,
+				par->in ? IFS_RX : IFS_TX,
+				ip_hdr(skb)->protocol, skb->len);
+	}
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_qtaguid_match_info *info = par->matchinfo;
+	const struct file *filp;
+	bool got_sock = false;
+	struct sock *sk;
+	uid_t sock_uid;
+	bool res;
+	pr_debug("xt_qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+		 par->hooknum, skb, par->in, par->out, par->family);
+	if (skb == NULL) {
+		res = (info->match ^ info->invert) == 0;
+		goto ret_res;
+	}
+
+	sk = skb->sk;
+
+	if (sk == NULL) {
+		/*  A missing sk->sk_socket happens when packets are in-flight
+		 * and the matching socket is already closed and gone.
+		 */
+		sk = qtaguid_find_sk(skb, par);
+		/* If we got the socket from the find_sk(), we will need to put
+		 * it back, as nf_tproxy_get_sock_v4() got it. */
+		got_sock = sk;
+	}
+	pr_debug("xt_qtaguid[%d]: sk=%p got_sock=%d proto=%d\n",
+		par->hooknum, sk, got_sock, ip_hdr(skb)->protocol);
+	if (sk != NULL) {
+		pr_debug("xt_qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+			par->hooknum, sk, sk->sk_socket,
+			sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+		filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+		pr_debug("xt_qtaguid[%d]: filp...uid=%d\n",
+			par->hooknum, filp ? filp->f_cred->fsuid : -1);
+	}
+
+	if (sk == NULL || sk->sk_socket == NULL) {
+		/* Here, the qtaguid_find_sk() using connection tracking
+		 * couldn't find the owner, so for now we just count them
+		 * against the system. */
+		/* TODO: unhack how to force just accounting.
+		 * For now we only do iface stats when the uid-owner is not
+		 * requested */
+		if (!(info->match & XT_QTAGUID_UID))
+			account_for_uid(skb, sk, 0, par);
+		pr_debug("xt_qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+			par->hooknum,
+			sk ? sk->sk_socket : NULL);
+		res =  (info->match ^ info->invert) == 0;
+		goto put_sock_ret_res;
+	} else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+		res = false;
+		goto put_sock_ret_res;
+	}
+	filp = sk->sk_socket->file;
+	if (filp == NULL) {
+		pr_debug("xt_qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+		res = ((info->match ^ info->invert) &
+			(XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+		goto put_sock_ret_res;
+	}
+	sock_uid = filp->f_cred->fsuid;
+	/* TODO: unhack how to force just accounting.
+	 * For now we only do iface stats when the uid-owner is not requested */
+	if (!(info->match & XT_QTAGUID_UID))
+		account_for_uid(skb, sk, sock_uid, par);
+
+	/* The following two tests fail the match when:
+	 *    id not in range AND no inverted condition requested
+	 * or id     in range AND    inverted condition requested
+	 * Thus (!a && b) || (a && !b) == a ^ b
+	 */
+	if (info->match & XT_QTAGUID_UID)
+		if ((filp->f_cred->fsuid >= info->uid_min &&
+		     filp->f_cred->fsuid <= info->uid_max) ^
+		    !(info->invert & XT_QTAGUID_UID)) {
+			pr_debug("xt_qtaguid[%d]: leaving uid not matching\n",
+				 par->hooknum);
+			res = false;
+			goto put_sock_ret_res;
+		}
+	if (info->match & XT_QTAGUID_GID)
+		if ((filp->f_cred->fsgid >= info->gid_min &&
+				filp->f_cred->fsgid <= info->gid_max) ^
+			!(info->invert & XT_QTAGUID_GID)) {
+			pr_debug("xt_qtaguid[%d]: leaving gid not matching\n",
+				par->hooknum);
+			res = false;
+			goto put_sock_ret_res;
+		}
+
+	pr_debug("xt_qtaguid[%d]: leaving matched\n", par->hooknum);
+	res = true;
+
+put_sock_ret_res:
+	if (got_sock)
+		xt_socket_put_sk(sk);
+ret_res:
+	pr_debug("xt_qtaguid[%d]: left %d\n", par->hooknum, res);
+	return res;
+}
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
+				  off_t items_to_skip, int char_count, int *eof,
+				  void *data)
+{
+	char *outp = page;
+	int len;
+	unsigned long flags;
+	uid_t uid;
+	struct sock_tag *sock_tag_entry;
+	struct rb_node *node;
+	int item_index = 0;
+
+	pr_debug("xt_qtaguid:proc ctrl page=%p off=%ld char_count=%d *eof=%d\n",
+		page, items_to_skip, char_count, *eof);
+
+	if (*eof)
+		return 0;
+
+	spin_lock_irqsave(&sock_tag_list_lock, flags);
+	for (node = rb_first(&sock_tag_tree);
+	     node;
+	     node = rb_next(node)) {
+		if (item_index++ < items_to_skip)
+			continue;
+		sock_tag_entry =  rb_entry(node, struct sock_tag, node);
+		uid = get_uid_from_tag(sock_tag_entry->tag);
+		pr_debug("xt_qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%d)\n",
+			sock_tag_entry->sk,
+			sock_tag_entry->tag,
+			uid);
+		len = snprintf(outp, char_count,
+			       "sock=%p tag=0x%llx (uid=%u)\n",
+			       sock_tag_entry->sk, sock_tag_entry->tag, uid);
+		if (len >= char_count) {
+			spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+			*outp = '\0';
+			return outp - page;
+		}
+		outp += len;
+		char_count -= len;
+		(*num_items_returned)++;
+	}
+	spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+	*eof = 1;
+	return outp - page;
+}
+
+static int qtaguid_ctrl_parse(const char *input, int count)
+{
+	char cmd;
+	int sock_fd = 0;
+	uid_t uid = 0;
+	tag_t acct_tag = 0;
+	struct socket *el_socket;
+	int res, argc;
+	struct sock_tag *sock_tag_entry;
+	unsigned long flags;
+
+	pr_debug("xt_qtaguid: ctrl(%s): entered\n", input);
+	/* Unassigned args will get defaulted later. */
+	/* TODO: get acct_tag_str, keep a list of available tags for the
+	 * uid, use num as acct_tag. */
+	argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
+	pr_debug("xt_qtaguid: ctrl(%s): argc=%d cmd=%c sock_fd=%d "
+		"acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+		acct_tag, uid);
+
+	/* Collect params for commands */
+	switch (cmd) {
+	case 't':
+	case 'u':
+		if (argc < 2) {
+			res = -EINVAL;
+			goto err;
+		}
+		el_socket = sockfd_lookup(sock_fd, &res);
+		if (!el_socket) {
+			pr_info("xt_qtaguid: ctrl(%s): failed to lookup"
+				" sock_fd=%d err=%d\n", input, sock_fd, res);
+			goto err;
+		}
+		spin_lock_irqsave(&sock_tag_list_lock, flags);
+		/* TODO: optim: pass in the current_fsuid() to do lookups
+		 * as look ups will always be initiated form the same uid. */
+		sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+		if (!sock_tag_entry)
+			spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+		/* HERE: The lock is held if there was a matching sock tag entry */
+		break;
+	default:
+		res = -EINVAL;
+		goto err;
+	}
+	/* HERE: The lock is held if there was a matching sock tag entry */
+
+	/* Process commands */
+	switch (cmd) {
+
+	case 't':
+		if (argc < 2) {
+			res = -EINVAL;
+			/* HERE: The lock is held if there was a matching sock
+			 * tag entry */
+			goto err_unlock;
+		}
+		if (argc < 3) {
+			acct_tag = 0;
+		} else if (!valid_atag(acct_tag)) {
+			res = -EINVAL;
+			/* HERE: The lock is held if there was a matching sock
+			 * tag entry */
+			goto err_unlock;
+		}
+		if (argc < 4)
+			uid = current_fsuid();
+		if (!sock_tag_entry) {
+			/* HERE: There is no lock held because there was no
+			 * sock tag entry */
+			sock_tag_entry = kmalloc(sizeof(*sock_tag_entry),
+						GFP_KERNEL);
+			if (!sock_tag_entry) {
+				res = -ENOMEM;
+				goto err;
+			}
+			memset(sock_tag_entry, 0, sizeof(*sock_tag_entry));
+			sock_tag_entry->sk = el_socket->sk;
+			/* TODO: check that uid==current_fsuid() except
+			 * for special uid/gid. */
+			sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+								uid);
+			spin_lock_irqsave(&sock_tag_list_lock, flags);
+			sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+		} else {
+			/* HERE: The lock is held because there is a matching
+			 * sock tag entry */
+			/* Just update the acct_tag portion. */
+			uid_t orig_uid = get_uid_from_tag(sock_tag_entry->tag);
+			sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+								orig_uid);
+		}
+		spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+		pr_debug("xt_qtaguid: tag: sock_tag_entry->sk=%p "
+			"...->tag=0x%llx (uid=%u)\n",
+			sock_tag_entry->sk, sock_tag_entry->tag,
+			get_uid_from_tag(sock_tag_entry->tag));
+		break;
+
+	case 'u':
+		if (!sock_tag_entry) {
+			res = -EINVAL;
+			goto err;
+		}
+		/* TODO: check that the uid==current_fsuid()
+		 * except for special uid/gid. */
+		rb_erase(&sock_tag_entry->node, &sock_tag_tree);
+		spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+		kfree(sock_tag_entry);
+		break;
+	}
+
+	/* All of the input has been processed */
+	res = count;
+	goto ok;
+
+err_unlock:
+	if (sock_tag_entry)
+		spin_unlock_irqrestore(&sock_tag_list_lock, flags);
+err:
+ok:
+	pr_debug("xt_qtaguid: ctrl(%s): res=%d\n", input, res);
+	return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+			unsigned long count, void *data)
+{
+	char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+	if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+		return -EINVAL;
+
+	if (copy_from_user(input_buf, buffer, count))
+		return -EFAULT;
+
+	input_buf[count] = '\0';
+	return qtaguid_ctrl_parse(input_buf, count);
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
+				off_t items_to_skip, int char_count, int *eof,
+				void *data)
+{
+	char *outp = page;
+	int len;
+	unsigned long flags, flags2;
+	struct iface_stat *iface_entry;
+	struct tag_stat *ts_entry;
+	int item_index = 0;
+
+	/* TODO: make root access only */
+
+	pr_debug("xt_qtaguid:proc stats page=%p *num_items_returned=%p off=%ld "
+		"char_count=%d *eof=%d\n", page, *num_items_returned,
+		items_to_skip, char_count, *eof);
+
+	if (*eof)
+		return 0;
+
+	if (!items_to_skip) {
+		/* The idx is there to help debug when things go belly up. */
+		len = snprintf(outp, char_count,
+			"idx iface acct_tag_hex uid_tag_int rx_bytes "
+			"tx_bytes\n");
+		/* Don't advance the outp unless the whole line was printed */
+		if (len >= char_count) {
+			*outp = '\0';
+			return outp - page;
+		}
+		outp += len;
+		char_count -= len;
+	}
+
+	spin_lock_irqsave(&iface_stat_list_lock, flags);
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		struct rb_node *node;
+		spin_lock_irqsave(&iface_entry->tag_stat_list_lock, flags2);
+		for (node = rb_first(&iface_entry->tag_stat_tree);
+		     node;
+		     node = rb_next(node)) {
+			ts_entry =  rb_entry(node, struct tag_stat, node);
+			if (item_index++ < items_to_skip)
+				continue;
+			len = snprintf(outp, char_count,
+				       "%d %s 0x%llx %u %llu %llu\n",
+				       item_index,
+				       iface_entry->ifname,
+				       get_atag_from_tag(ts_entry->tag),
+				       get_uid_from_tag(ts_entry->tag),
+				       dc_sum_bytes(&ts_entry->counters,
+						    IFS_RX),
+				       dc_sum_bytes(&ts_entry->counters,
+						    IFS_TX));
+			if (len >= char_count) {
+				spin_unlock_irqrestore(
+					&iface_entry->tag_stat_list_lock,
+					flags2);
+				spin_unlock_irqrestore(
+					&iface_stat_list_lock, flags);
+				*outp = '\0';
+				return outp - page;
+			}
+			outp += len;
+			char_count -= len;
+			(*num_items_returned)++;
+		}
+		spin_unlock_irqrestore(&iface_entry->tag_stat_list_lock,
+				flags2);
+	}
+	spin_unlock_irqrestore(&iface_stat_list_lock, flags);
+
+	*eof = 1;
+	return outp - page;
+}
+
+/*------------------------------------------*/
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+	int ret;
+	*res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+	if (!*res_procdir) {
+		pr_err("xt_qtaguid: failed to create proc/.../xt_qtaguid\n");
+		ret = -ENOMEM;
+		goto no_dir;
+	}
+
+	xt_qtaguid_ctrl_file = create_proc_entry("ctrl", 0666,
+						*res_procdir);
+	if (!xt_qtaguid_ctrl_file) {
+		pr_err("xt_qtaguid: failed to create xt_qtaguid/ctrl "
+			" file\n");
+		ret = -ENOMEM;
+		goto no_ctrl_entry;
+	}
+	xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read;
+	xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write;
+
+	xt_qtaguid_stats_file = create_proc_entry("stats", 0666,
+						*res_procdir);
+	if (!xt_qtaguid_stats_file) {
+		pr_err("xt_qtaguid: failed to create xt_qtaguid/stats "
+			"file\n");
+		ret = -ENOMEM;
+		goto no_stats_entry;
+	}
+	/*
+	 * TODO: add extra read_proc for full stats with protocol
+	 * breakout
+	 */
+	xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read;
+	/*
+	 * TODO: add support counter hacking
+	 * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+	 */
+	return 0;
+
+no_stats_entry:
+	remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+	remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+	return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+	/*
+	 * This module masquerades as the "owner" module so that iptables
+	 * tools can deal with it.
+	 */
+	.name       = "owner",
+	.revision   = 1,
+	.family     = NFPROTO_UNSPEC,
+	.match      = qtaguid_mt,
+	.matchsize  = sizeof(struct xt_qtaguid_match_info),
+	.me         = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+	if (qtaguid_proc_register(&xt_qtaguid_procdir)
+	    || iface_stat_init(xt_qtaguid_procdir)
+	    || xt_register_match(&qtaguid_mt_reg))
+		return -1;
+	return 0;
+}
+
+/* TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 0000000..454ce10
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,289 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * 	netfilter module to enforce network quotas
+ * 	Sam Johnston <samj@samj.net>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License; either
+ *	version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+
+/**
+ * @lock:	lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+	u_int64_t quota;
+	spinlock_t lock;
+	struct list_head list;
+	atomic_t ref;
+	char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+	struct proc_dir_entry *procfs_entry;
+};
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static unsigned int quota_list_uid   = 0;
+static unsigned int quota_list_gid   = 0;
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
+
+static int quota_proc_read(char *page, char **start, off_t offset,
+                           int count, int *eof, void *data)
+{
+	struct xt_quota_counter *e = data;
+	int ret;
+
+	spin_lock_bh(&e->lock);
+	ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota);
+	spin_unlock_bh(&e->lock);
+	return ret;
+}
+
+static int quota_proc_write(struct file *file, const char __user *input,
+                            unsigned long size, void *data)
+{
+	struct xt_quota_counter *e = data;
+	char buf[sizeof("18446744073709551616")];
+
+	if (size > sizeof(buf))
+		size = sizeof(buf);
+	if (copy_from_user(buf, input, size) != 0)
+		return -EFAULT;
+	buf[sizeof(buf)-1] = '\0';
+
+	spin_lock_bh(&e->lock);
+	e->quota = simple_strtoull(buf, NULL, 0);
+	spin_unlock_bh(&e->lock);
+	return size;
+}
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+	struct xt_quota_counter *e;
+	unsigned int size;
+
+	/* Do not need all the procfs things for anonymous counters. */
+	size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+	e = kmalloc(size, GFP_KERNEL);
+	if (e == NULL)
+		return NULL;
+
+	e->quota = q->quota;
+	spin_lock_init(&e->lock);
+	if (!anon) {
+		INIT_LIST_HEAD(&e->list);
+		atomic_set(&e->ref, 1);
+		strlcpy(e->name, q->name, sizeof(e->name));
+	}
+	return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:	name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+	struct proc_dir_entry *p;
+	struct xt_quota_counter *e = NULL;
+	struct xt_quota_counter *new_e;
+
+	if (*q->name == '\0')
+		return q2_new_counter(q, true);
+
+	/* No need to hold a lock while getting a new counter */
+	new_e = q2_new_counter(q, false);
+	if (new_e == NULL)
+		goto out;
+
+	spin_lock_bh(&counter_list_lock);
+	list_for_each_entry(e, &counter_list, list)
+		if (strcmp(e->name, q->name) == 0) {
+			atomic_inc(&e->ref);
+			spin_unlock_bh(&counter_list_lock);
+			kfree(new_e);
+			pr_debug("xt_quota2: old counter name=%s", e->name);
+			return e;
+		}
+	e = new_e;
+	pr_debug("xt_quota2: new_counter name=%s", e->name);
+	list_add_tail(&e->list, &counter_list);
+	/* The entry having a refcount of 1 is not directly destructible.
+	 * This func has not yet returned the new entry, thus iptables
+	 * has not references for destroying this entry.
+	 * For another rule to try to destroy it, it would 1st need for this
+	 * func* to be re-invoked, acquire a new ref for the same named quota.
+	 * Nobody will access the e->procfs_entry either.
+	 * So release the lock. */
+	spin_unlock_bh(&counter_list_lock);
+
+	/* create_proc_entry() is not spin_lock happy */
+	p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms,
+	                      proc_xt_quota);
+
+	if (IS_ERR_OR_NULL(p)) {
+		spin_lock_bh(&counter_list_lock);
+		list_del(&e->list);
+		spin_unlock_bh(&counter_list_lock);
+		goto out;
+	}
+	p->data         = e;
+	p->read_proc    = quota_proc_read;
+	p->write_proc   = quota_proc_write;
+	p->uid          = quota_list_uid;
+	p->gid          = quota_list_gid;
+	return e;
+
+ out:
+	kfree(e);
+	return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+	pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+	if (q->flags & ~XT_QUOTA_MASK)
+		return -EINVAL;
+
+	q->name[sizeof(q->name)-1] = '\0';
+	if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+		printk(KERN_ERR "xt_quota.3: illegal name\n");
+		return -EINVAL;
+	}
+
+	q->master = q2_get_counter(q);
+	if (q->master == NULL) {
+		printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+
+	if (*q->name == '\0') {
+		kfree(e);
+		return;
+	}
+
+	spin_lock_bh(&counter_list_lock);
+	if (!atomic_dec_and_test(&e->ref)) {
+		spin_unlock_bh(&counter_list_lock);
+		return;
+	}
+
+	list_del(&e->list);
+	remove_proc_entry(e->name, proc_xt_quota);
+	spin_unlock_bh(&counter_list_lock);
+	kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+	bool ret = q->flags & XT_QUOTA_INVERT;
+
+	spin_lock_bh(&e->lock);
+	if (q->flags & XT_QUOTA_GROW) {
+		/*
+		 * While no_change is pointless in "grow" mode, we will
+		 * implement it here simply to have a consistent behavior.
+		 */
+		if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+			e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+		}
+		ret = true;
+	} else {
+		if (e->quota >= skb->len) {
+			if (!(q->flags & XT_QUOTA_NO_CHANGE))
+				e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+			ret = !ret;
+		} else {
+			/* we do not allow even small packets from now on */
+			e->quota = 0;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV4,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV6,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init quota_mt2_init(void)
+{
+	int ret;
+	pr_debug("xt_quota2: init()");
+
+	proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+	if (proc_xt_quota == NULL)
+		return -EACCES;
+
+	ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	if (ret < 0)
+		remove_proc_entry("xt_quota", init_net.proc_net);
+	pr_debug("xt_quota2: init() %d", ret);
+	return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+	xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index fe39f7e..ddf5e05 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,7 +35,7 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
-static void
+void
 xt_socket_put_sk(struct sock *sk)
 {
 	if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@
 	else
 		sock_put(sk);
 }
+EXPORT_SYMBOL(xt_socket_put_sk);
 
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@
 	return 0;
 }
 
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
-	     const struct xt_socket_mtinfo1 *info)
+struct sock*
+xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@
 		hp = skb_header_pointer(skb, ip_hdrlen(skb),
 					sizeof(_hdr), &_hdr);
 		if (hp == NULL)
-			return false;
+			return NULL;
 
 		protocol = iph->protocol;
 		saddr = iph->saddr;
@@ -131,9 +131,9 @@
 	} else if (iph->protocol == IPPROTO_ICMP) {
 		if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
 					&sport, &dport))
-			return false;
+			return NULL;
 	} else {
-		return false;
+		return NULL;
 	}
 
 #ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@
 
 	sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
 				   saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+
+	pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
+		 protocol, &saddr, ntohs(sport),
+		 &daddr, ntohs(dport),
+		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+
+	return sk;
+}
+EXPORT_SYMBOL(xt_socket_get4_sk);
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+	     const struct xt_socket_mtinfo1 *info)
+{
+	struct sock *sk;
+
+	sk = xt_socket_get4_sk(skb, par);
 	if (sk != NULL) {
 		bool wildcard;
 		bool transparent = true;
@@ -179,11 +196,6 @@
 			sk = NULL;
 	}
 
-	pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
-		 protocol, &saddr, ntohs(sport),
-		 &daddr, ntohs(dport),
-		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
 	return (sk != NULL);
 }
 
@@ -253,8 +265,8 @@
 	return 0;
 }
 
-static bool
-socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+struct sock*
+xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	struct ipv6hdr *iph = ipv6_hdr(skb);
 	struct udphdr _hdr, *hp = NULL;
@@ -262,7 +274,6 @@
 	struct in6_addr *daddr, *saddr;
 	__be16 dport, sport;
 	int thoff, tproto;
-	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
 
 	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
 	if (tproto < 0) {
@@ -274,7 +285,7 @@
 		hp = skb_header_pointer(skb, thoff,
 					sizeof(_hdr), &_hdr);
 		if (hp == NULL)
-			return false;
+			return NULL;
 
 		saddr = &iph->saddr;
 		sport = hp->source;
@@ -284,13 +295,30 @@
 	} else if (tproto == IPPROTO_ICMPV6) {
 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
 					 &sport, &dport))
-			return false;
+			return NULL;
 	} else {
-		return false;
+		return NULL;
 	}
 
 	sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
 				   saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+	pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
+		 "(orig %pI6:%hu) sock %p\n",
+		 tproto, saddr, ntohs(sport),
+		 daddr, ntohs(dport),
+		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+	return sk;
+}
+EXPORT_SYMBOL(xt_socket_get6_sk);
+
+static bool
+socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct sock *sk;
+	const struct xt_socket_mtinfo1 *info;
+
+	info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+	sk = xt_socket_get6_sk(skb, par);
 	if (sk != NULL) {
 		bool wildcard;
 		bool transparent = true;
@@ -313,12 +341,6 @@
 			sk = NULL;
 	}
 
-	pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
-		 "(orig %pI6:%hu) sock %p\n",
-		 tproto, saddr, ntohs(sport),
-		 daddr, ntohs(dport),
-		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
 	return (sk != NULL);
 }
 #endif
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe89..8e12c8a 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called rfkill.
 
+config RFKILL_PM
+	bool "Power off on suspend"
+	depends on RFKILL && PM
+	default y
+
 # LED trigger support
 config RFKILL_LEDS
 	bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640..df2dae6 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -769,6 +769,7 @@
 }
 EXPORT_SYMBOL(rfkill_pause_polling);
 
+#ifdef CONFIG_RFKILL_PM
 void rfkill_resume_polling(struct rfkill *rfkill)
 {
 	BUG_ON(!rfkill);
@@ -803,14 +804,17 @@
 
 	return 0;
 }
+#endif
 
 static struct class rfkill_class = {
 	.name		= "rfkill",
 	.dev_release	= rfkill_release,
 	.dev_attrs	= rfkill_dev_attrs,
 	.dev_uevent	= rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
 	.suspend	= rfkill_suspend,
 	.resume		= rfkill_resume,
+#endif
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/security/commoncap.c b/security/commoncap.c
index a93b3b7..1322b6a 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -29,6 +29,10 @@
 #include <linux/securebits.h>
 #include <linux/user_namespace.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
 /*
  * If a non-root user executes a setuid-root binary in
  * !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -83,6 +87,11 @@
 int cap_capable(struct task_struct *tsk, const struct cred *cred,
 		struct user_namespace *targ_ns, int cap, int audit)
 {
+	if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+		return 0;
+	if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+		return 0;
+
 	for (;;) {
 		/* The creator of the user namespace has all caps. */
 		if (targ_ns != &init_user_ns && targ_ns->creator == cred->user)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b194be0..3b12700 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1256,7 +1256,7 @@
 int snd_soc_resume(struct device *dev)
 {
 	struct snd_soc_card *card = dev_get_drvdata(dev);
-	int i;
+	int i, ac97_control = 0;
 
 	/* AC97 devices might have other drivers hanging off them so
 	 * need to resume immediately.  Other drivers don't have that
@@ -1265,14 +1265,15 @@
 	 */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
-		if (cpu_dai->driver->ac97_control) {
-			dev_dbg(dev, "Resuming AC97 immediately\n");
-			soc_resume_deferred(&card->deferred_resume_work);
-		} else {
-			dev_dbg(dev, "Scheduling resume work\n");
-			if (!schedule_work(&card->deferred_resume_work))
-				dev_err(dev, "resume work item may be lost\n");
-		}
+		ac97_control |= cpu_dai->driver->ac97_control;
+	}
+	if (ac97_control) {
+		dev_dbg(dev, "Resuming AC97 immediately\n");
+		soc_resume_deferred(&card->deferred_resume_work);
+	} else {
+		dev_dbg(dev, "Scheduling resume work\n");
+		if (!schedule_work(&card->deferred_resume_work))
+			dev_err(dev, "resume work item may be lost\n");
 	}
 
 	return 0;