Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4

AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.

* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
  PRNG: Device tree entry for qrng device.
  vidc:1080p: Set video core timeout value for Thumbnail mode
  msm: sps: improve the debugging support in SPS driver
  board-8064 msm: Overlap secure and non secure video firmware heaps.
  msm: clock: Add handoff ops for 7x30 and copper XO clocks
  msm_fb: display: Wait for external vsync before DTV IOMMU unmap
  msm: Fix ciruclar dependency in debug UART settings
  msm: gdsc: Add GDSC regulator driver for msm-copper
  defconfig: Enable Mobicore Driver.
  mobicore: Add mobicore driver.
  mobicore: rename variable to lower case.
  mobicore: rename folder.
  mobicore: add makefiles
  mobicore: initial import of kernel driver
  ASoC: msm: Add SLIMBUS_2_RX CPU DAI
  board-8064-gpio: Update FUNC for EPM SPI CS
  msm_fb: display: Remove chicken bit config during video playback
  mmc: msm_sdcc: enable the sanitize capability
  msm-fb: display: lm2 writeback support on mpq platfroms
  msm_fb: display: Disable LVDS phy & pll during panel off
  ...

Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d6dc44c..da8b2dc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,7 +1,6 @@
 /*
    BlueZ - Bluetooth protocol stack for Linux
-   Copyright (C) 2000-2001 Qualcomm Incorporated
-   Copyright (C) 2011 ProFUSION Embedded Systems
+   Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum.  All rights reserved.
 
    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
 
@@ -40,11 +39,13 @@
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+#include <linux/notifier.h>
 #include <linux/rfkill.h>
 #include <linux/timer.h>
 #include <linux/crypto.h>
 #include <net/sock.h>
 
+#include <asm/system.h>
 #include <linux/uaccess.h>
 #include <asm/unaligned.h>
 
@@ -53,9 +54,13 @@
 
 #define AUTO_OFF_TIMEOUT 2000
 
-static void hci_rx_work(struct work_struct *work);
-static void hci_cmd_work(struct work_struct *work);
-static void hci_tx_work(struct work_struct *work);
+static void hci_cmd_task(unsigned long arg);
+static void hci_rx_task(unsigned long arg);
+static void hci_tx_task(unsigned long arg);
+
+static DEFINE_RWLOCK(hci_task_lock);
+
+static bool enable_smp = 1;
 
 /* HCI device list */
 LIST_HEAD(hci_dev_list);
@@ -65,11 +70,32 @@
 LIST_HEAD(hci_cb_list);
 DEFINE_RWLOCK(hci_cb_list_lock);
 
+/* AMP Manager event callbacks */
+LIST_HEAD(amp_mgr_cb_list);
+DEFINE_RWLOCK(amp_mgr_cb_list_lock);
+
+/* HCI protocols */
+#define HCI_MAX_PROTO	2
+struct hci_proto *hci_proto[HCI_MAX_PROTO];
+
+/* HCI notifiers list */
+static ATOMIC_NOTIFIER_HEAD(hci_notifier);
+
 /* ---- HCI notifications ---- */
 
+int hci_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&hci_notifier, nb);
+}
+
+int hci_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&hci_notifier, nb);
+}
+
 static void hci_notify(struct hci_dev *hdev, int event)
 {
-	hci_sock_dev_event(hdev, event);
+	atomic_notifier_call_chain(&hci_notifier, event, hdev);
 }
 
 /* ---- HCI requests ---- */
@@ -81,28 +107,8 @@
 	/* If this is the init phase check if the completed command matches
 	 * the last init command, and if not just return.
 	 */
-	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
-		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
-		struct sk_buff *skb;
-
-		/* Some CSR based controllers generate a spontaneous
-		 * reset complete event during init and any pending
-		 * command will never be completed. In such a case we
-		 * need to resend whatever was the last sent
-		 * command.
-		 */
-
-		if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
-			return;
-
-		skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
-		if (skb) {
-			skb_queue_head(&hdev->cmd_q, skb);
-			queue_work(hdev->workqueue, &hdev->cmd_work);
-		}
-
+	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
 		return;
-	}
 
 	if (hdev->req_status == HCI_REQ_PEND) {
 		hdev->req_result = result;
@@ -146,7 +152,7 @@
 
 	switch (hdev->req_status) {
 	case HCI_REQ_DONE:
-		err = -bt_to_errno(hdev->req_result);
+		err = -bt_err(hdev->req_result);
 		break;
 
 	case HCI_REQ_CANCELED:
@@ -187,75 +193,16 @@
 
 	/* Reset device */
 	set_bit(HCI_RESET, &hdev->flags);
+	memset(&hdev->features, 0, sizeof(hdev->features));
 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
 }
 
-static void bredr_init(struct hci_dev *hdev)
-{
-	struct hci_cp_delete_stored_link_key cp;
-	__le16 param;
-	__u8 flt_type;
-
-	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
-
-	/* Mandatory initialization */
-
-	/* Reset */
-	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
-		set_bit(HCI_RESET, &hdev->flags);
-		hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
-	}
-
-	/* Read Local Supported Features */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
-	/* Read Local Version */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
-	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
-	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-
-	/* Read BD Address */
-	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
-
-	/* Read Class of Device */
-	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-
-	/* Read Local Name */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
-	/* Read Voice Setting */
-	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
-
-	/* Optional initialization */
-
-	/* Clear Event Filters */
-	flt_type = HCI_FLT_CLEAR_ALL;
-	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-
-	/* Connection accept timeout ~20 secs */
-	param = cpu_to_le16(0x7d00);
-	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-
-	bacpy(&cp.bdaddr, BDADDR_ANY);
-	cp.delete_all = 1;
-	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
-}
-
-static void amp_init(struct hci_dev *hdev)
-{
-	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
-
-	/* Reset */
-	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
-
-	/* Read Local Version */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-}
-
 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
 {
+	struct hci_cp_delete_stored_link_key cp;
 	struct sk_buff *skb;
+	__le16 param;
+	__u8 flt_type;
 
 	BT_DBG("%s %ld", hdev->name, opt);
 
@@ -267,24 +214,89 @@
 		skb->dev = (void *) hdev;
 
 		skb_queue_tail(&hdev->cmd_q, skb);
-		queue_work(hdev->workqueue, &hdev->cmd_work);
+		tasklet_schedule(&hdev->cmd_task);
 	}
 	skb_queue_purge(&hdev->driver_init);
 
-	switch (hdev->dev_type) {
-	case HCI_BREDR:
-		bredr_init(hdev);
-		break;
+	/* Mandatory initialization */
 
-	case HCI_AMP:
-		amp_init(hdev);
-		break;
-
-	default:
-		BT_ERR("Unknown device type %d", hdev->dev_type);
-		break;
+	/* Reset */
+	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
+			set_bit(HCI_RESET, &hdev->flags);
+			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
 	}
 
+	/* Read Local Version */
+	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+
+	/* Set default HCI Flow Control Mode */
+	if (hdev->dev_type == HCI_BREDR)
+		hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
+	else
+		hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
+
+	/* Read HCI Flow Control Mode */
+	hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
+	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
+	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+
+	/* Read Data Block Size (ACL mtu, max pkt, etc.) */
+	hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
+#if 0
+	/* Host buffer size */
+	{
+		struct hci_cp_host_buffer_size cp;
+		cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
+		cp.sco_mtu = HCI_MAX_SCO_SIZE;
+		cp.acl_max_pkt = cpu_to_le16(0xffff);
+		cp.sco_max_pkt = cpu_to_le16(0xffff);
+		hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
+	}
+#endif
+
+	if (hdev->dev_type == HCI_BREDR) {
+		/* BR-EDR initialization */
+
+		/* Read Local Supported Features */
+		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
+		/* Read BD Address */
+		hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
+
+		/* Read Class of Device */
+		hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+
+		/* Read Local Name */
+		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+
+		/* Read Voice Setting */
+		hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+
+		/* Optional initialization */
+		/* Clear Event Filters */
+		flt_type = HCI_FLT_CLEAR_ALL;
+		hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+
+		/* Connection accept timeout ~20 secs */
+		param = cpu_to_le16(0x7d00);
+		hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+
+		bacpy(&cp.bdaddr, BDADDR_ANY);
+		cp.delete_all = 1;
+		hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
+				sizeof(cp), &cp);
+	} else {
+		/* AMP initialization */
+		/* Connection accept timeout ~5 secs */
+		param = cpu_to_le16(0x1f40);
+		hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+
+		/* Read AMP Info */
+		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+	}
 }
 
 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -339,7 +351,8 @@
  * Device is held on return. */
 struct hci_dev *hci_dev_get(int index)
 {
-	struct hci_dev *hdev = NULL, *d;
+	struct hci_dev *hdev = NULL;
+	struct list_head *p;
 
 	BT_DBG("%d", index);
 
@@ -347,7 +360,8 @@
 		return NULL;
 
 	read_lock(&hci_dev_list_lock);
-	list_for_each_entry(d, &hci_dev_list, list) {
+	list_for_each(p, &hci_dev_list) {
+		struct hci_dev *d = list_entry(p, struct hci_dev, list);
 		if (d->id == index) {
 			hdev = hci_dev_hold(d);
 			break;
@@ -356,211 +370,75 @@
 	read_unlock(&hci_dev_list_lock);
 	return hdev;
 }
+EXPORT_SYMBOL(hci_dev_get);
 
 /* ---- Inquiry support ---- */
-
-bool hci_discovery_active(struct hci_dev *hdev)
-{
-	struct discovery_state *discov = &hdev->discovery;
-
-	switch (discov->state) {
-	case DISCOVERY_FINDING:
-	case DISCOVERY_RESOLVING:
-		return true;
-
-	default:
-		return false;
-	}
-}
-
-void hci_discovery_set_state(struct hci_dev *hdev, int state)
-{
-	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
-
-	if (hdev->discovery.state == state)
-		return;
-
-	switch (state) {
-	case DISCOVERY_STOPPED:
-		if (hdev->discovery.state != DISCOVERY_STARTING)
-			mgmt_discovering(hdev, 0);
-		hdev->discovery.type = 0;
-		break;
-	case DISCOVERY_STARTING:
-		break;
-	case DISCOVERY_FINDING:
-		mgmt_discovering(hdev, 1);
-		break;
-	case DISCOVERY_RESOLVING:
-		break;
-	case DISCOVERY_STOPPING:
-		break;
-	}
-
-	hdev->discovery.state = state;
-}
-
 static void inquiry_cache_flush(struct hci_dev *hdev)
 {
-	struct discovery_state *cache = &hdev->discovery;
-	struct inquiry_entry *p, *n;
+	struct inquiry_cache *cache = &hdev->inq_cache;
+	struct inquiry_entry *next  = cache->list, *e;
 
-	list_for_each_entry_safe(p, n, &cache->all, all) {
-		list_del(&p->all);
-		kfree(p);
+	BT_DBG("cache %p", cache);
+
+	cache->list = NULL;
+	while ((e = next)) {
+		next = e->next;
+		kfree(e);
 	}
-
-	INIT_LIST_HEAD(&cache->unknown);
-	INIT_LIST_HEAD(&cache->resolve);
 }
 
 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
-	struct discovery_state *cache = &hdev->discovery;
+	struct inquiry_cache *cache = &hdev->inq_cache;
 	struct inquiry_entry *e;
 
 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
 
-	list_for_each_entry(e, &cache->all, all) {
+	for (e = cache->list; e; e = e->next)
 		if (!bacmp(&e->data.bdaddr, bdaddr))
-			return e;
-	}
-
-	return NULL;
-}
-
-struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
-						       bdaddr_t *bdaddr)
-{
-	struct discovery_state *cache = &hdev->discovery;
-	struct inquiry_entry *e;
-
-	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
-
-	list_for_each_entry(e, &cache->unknown, list) {
-		if (!bacmp(&e->data.bdaddr, bdaddr))
-			return e;
-	}
-
-	return NULL;
-}
-
-struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
-						       bdaddr_t *bdaddr,
-						       int state)
-{
-	struct discovery_state *cache = &hdev->discovery;
-	struct inquiry_entry *e;
-
-	BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
-
-	list_for_each_entry(e, &cache->resolve, list) {
-		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
-			return e;
-		if (!bacmp(&e->data.bdaddr, bdaddr))
-			return e;
-	}
-
-	return NULL;
-}
-
-void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
-				      struct inquiry_entry *ie)
-{
-	struct discovery_state *cache = &hdev->discovery;
-	struct list_head *pos = &cache->resolve;
-	struct inquiry_entry *p;
-
-	list_del(&ie->list);
-
-	list_for_each_entry(p, &cache->resolve, list) {
-		if (p->name_state != NAME_PENDING &&
-				abs(p->data.rssi) >= abs(ie->data.rssi))
 			break;
-		pos = &p->list;
-	}
-
-	list_add(&ie->list, pos);
+	return e;
 }
 
-bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
-			      bool name_known, bool *ssp)
+void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
 {
-	struct discovery_state *cache = &hdev->discovery;
+	struct inquiry_cache *cache = &hdev->inq_cache;
 	struct inquiry_entry *ie;
 
 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
 
-	if (ssp)
-		*ssp = data->ssp_mode;
-
 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
-	if (ie) {
-		if (ie->data.ssp_mode && ssp)
-			*ssp = true;
+	if (!ie) {
+		/* Entry not in the cache. Add new one. */
+		ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+		if (!ie)
+			return;
 
-		if (ie->name_state == NAME_NEEDED &&
-						data->rssi != ie->data.rssi) {
-			ie->data.rssi = data->rssi;
-			hci_inquiry_cache_update_resolve(hdev, ie);
-		}
-
-		goto update;
-	}
-
-	/* Entry not in the cache. Add new one. */
-	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
-	if (!ie)
-		return false;
-
-	list_add(&ie->all, &cache->all);
-
-	if (name_known) {
-		ie->name_state = NAME_KNOWN;
-	} else {
-		ie->name_state = NAME_NOT_KNOWN;
-		list_add(&ie->list, &cache->unknown);
-	}
-
-update:
-	if (name_known && ie->name_state != NAME_KNOWN &&
-					ie->name_state != NAME_PENDING) {
-		ie->name_state = NAME_KNOWN;
-		list_del(&ie->list);
+		ie->next = cache->list;
+		cache->list = ie;
 	}
 
 	memcpy(&ie->data, data, sizeof(*data));
 	ie->timestamp = jiffies;
 	cache->timestamp = jiffies;
-
-	if (ie->name_state == NAME_NOT_KNOWN)
-		return false;
-
-	return true;
 }
 
 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
 {
-	struct discovery_state *cache = &hdev->discovery;
+	struct inquiry_cache *cache = &hdev->inq_cache;
 	struct inquiry_info *info = (struct inquiry_info *) buf;
 	struct inquiry_entry *e;
 	int copied = 0;
 
-	list_for_each_entry(e, &cache->all, all) {
+	for (e = cache->list; e && copied < num; e = e->next, copied++) {
 		struct inquiry_data *data = &e->data;
-
-		if (copied >= num)
-			break;
-
 		bacpy(&info->bdaddr, &data->bdaddr);
 		info->pscan_rep_mode	= data->pscan_rep_mode;
 		info->pscan_period_mode	= data->pscan_period_mode;
 		info->pscan_mode	= data->pscan_mode;
 		memcpy(info->dev_class, data->dev_class, 3);
 		info->clock_offset	= data->clock_offset;
-
 		info++;
-		copied++;
 	}
 
 	BT_DBG("cache %p, copied %d", cache, copied);
@@ -600,14 +478,14 @@
 	if (!hdev)
 		return -ENODEV;
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
 				inquiry_cache_empty(hdev) ||
 				ir.flags & IREQ_CACHE_FLUSH) {
 		inquiry_cache_flush(hdev);
 		do_inquiry = 1;
 	}
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 
 	timeo = ir.length * msecs_to_jiffies(2000);
 
@@ -629,9 +507,9 @@
 		goto done;
 	}
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 
 	BT_DBG("num_rsp %d", ir.num_rsp);
 
@@ -665,11 +543,6 @@
 
 	hci_req_lock(hdev);
 
-	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
-		ret = -ENODEV;
-		goto done;
-	}
-
 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
 		ret = -ERFKILL;
 		goto done;
@@ -683,16 +556,24 @@
 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 		set_bit(HCI_RAW, &hdev->flags);
 
-	/* Treat all non BR/EDR controllers as raw devices if
-	   enable_hs is not set */
-	if (hdev->dev_type != HCI_BREDR && !enable_hs)
-		set_bit(HCI_RAW, &hdev->flags);
-
 	if (hdev->open(hdev)) {
 		ret = -EIO;
 		goto done;
 	}
 
+	if (!skb_queue_empty(&hdev->cmd_q)) {
+		BT_ERR("command queue is not empty, purging");
+		skb_queue_purge(&hdev->cmd_q);
+	}
+	if (!skb_queue_empty(&hdev->rx_q)) {
+		BT_ERR("rx queue is not empty, purging");
+		skb_queue_purge(&hdev->rx_q);
+	}
+	if (!skb_queue_empty(&hdev->raw_q)) {
+		BT_ERR("raw queue is not empty, purging");
+		skb_queue_purge(&hdev->raw_q);
+	}
+
 	if (!test_bit(HCI_RAW, &hdev->flags)) {
 		atomic_set(&hdev->cmd_cnt, 1);
 		set_bit(HCI_INIT, &hdev->flags);
@@ -701,7 +582,7 @@
 		ret = __hci_request(hdev, hci_init_req, 0,
 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
-		if (lmp_host_le_capable(hdev))
+		if (lmp_le_capable(hdev))
 			ret = __hci_request(hdev, hci_le_init_req, 0,
 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
@@ -712,16 +593,17 @@
 		hci_dev_hold(hdev);
 		set_bit(HCI_UP, &hdev->flags);
 		hci_notify(hdev, HCI_DEV_UP);
-		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
-			hci_dev_lock(hdev);
-			mgmt_powered(hdev, 1);
-			hci_dev_unlock(hdev);
+		if (!test_bit(HCI_SETUP, &hdev->flags) &&
+				hdev->dev_type == HCI_BREDR) {
+			hci_dev_lock_bh(hdev);
+			mgmt_powered(hdev->id, 1);
+			hci_dev_unlock_bh(hdev);
 		}
 	} else {
 		/* Init failed, cleanup */
-		flush_work(&hdev->tx_work);
-		flush_work(&hdev->cmd_work);
-		flush_work(&hdev->rx_work);
+		tasklet_kill(&hdev->rx_task);
+		tasklet_kill(&hdev->tx_task);
+		tasklet_kill(&hdev->cmd_task);
 
 		skb_queue_purge(&hdev->cmd_q);
 		skb_queue_purge(&hdev->rx_q);
@@ -744,11 +626,11 @@
 	return ret;
 }
 
-static int hci_dev_do_close(struct hci_dev *hdev)
+static int hci_dev_do_close(struct hci_dev *hdev, u8 is_process)
 {
-	BT_DBG("%s %p", hdev->name, hdev);
+	unsigned long keepflags = 0;
 
-	cancel_work_sync(&hdev->le_scan);
+	BT_DBG("%s %p", hdev->name, hdev);
 
 	hci_req_cancel(hdev, ENODEV);
 	hci_req_lock(hdev);
@@ -759,44 +641,38 @@
 		return 0;
 	}
 
-	/* Flush RX and TX works */
-	flush_work(&hdev->tx_work);
-	flush_work(&hdev->rx_work);
+	/* Kill RX and TX tasks */
+	tasklet_kill(&hdev->rx_task);
+	tasklet_kill(&hdev->tx_task);
 
-	if (hdev->discov_timeout > 0) {
-		cancel_delayed_work(&hdev->discov_off);
-		hdev->discov_timeout = 0;
-		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-	}
-
-	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
-		cancel_delayed_work(&hdev->service_cache);
-
-	cancel_delayed_work_sync(&hdev->le_scan_disable);
-
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 	inquiry_cache_flush(hdev);
-	hci_conn_hash_flush(hdev);
-	hci_dev_unlock(hdev);
+	hci_conn_hash_flush(hdev, is_process);
+	hci_dev_unlock_bh(hdev);
 
 	hci_notify(hdev, HCI_DEV_DOWN);
 
+	if (hdev->dev_type == HCI_BREDR) {
+		hci_dev_lock_bh(hdev);
+		mgmt_powered(hdev->id, 0);
+		hci_dev_unlock_bh(hdev);
+	}
+
 	if (hdev->flush)
 		hdev->flush(hdev);
 
 	/* Reset device */
 	skb_queue_purge(&hdev->cmd_q);
 	atomic_set(&hdev->cmd_cnt, 1);
-	if (!test_bit(HCI_RAW, &hdev->flags) &&
-				test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
+	if (!test_bit(HCI_RAW, &hdev->flags)) {
 		set_bit(HCI_INIT, &hdev->flags);
 		__hci_request(hdev, hci_reset_req, 0,
 					msecs_to_jiffies(250));
 		clear_bit(HCI_INIT, &hdev->flags);
 	}
 
-	/* flush cmd  work */
-	flush_work(&hdev->cmd_work);
+	/* Kill cmd task */
+	tasklet_kill(&hdev->cmd_task);
 
 	/* Drop queues */
 	skb_queue_purge(&hdev->rx_q);
@@ -814,17 +690,15 @@
 	 * and no tasks are scheduled. */
 	hdev->close(hdev);
 
-	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
-		hci_dev_lock(hdev);
-		mgmt_powered(hdev, 0);
-		hci_dev_unlock(hdev);
-	}
+	/* Clear only non-persistent flags */
+	if (test_bit(HCI_MGMT, &hdev->flags))
+		set_bit(HCI_MGMT, &keepflags);
+	if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+		set_bit(HCI_LINK_KEYS, &keepflags);
+	if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
+		set_bit(HCI_DEBUG_KEYS, &keepflags);
 
-	/* Clear flags */
-	hdev->flags = 0;
-
-	memset(hdev->eir, 0, sizeof(hdev->eir));
-	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+	hdev->flags = keepflags;
 
 	hci_req_unlock(hdev);
 
@@ -840,12 +714,7 @@
 	hdev = hci_dev_get(dev);
 	if (!hdev)
 		return -ENODEV;
-
-	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-		cancel_delayed_work(&hdev->power_off);
-
-	err = hci_dev_do_close(hdev);
-
+	err = hci_dev_do_close(hdev, 1);
 	hci_dev_put(hdev);
 	return err;
 }
@@ -860,6 +729,7 @@
 		return -ENODEV;
 
 	hci_req_lock(hdev);
+	tasklet_disable(&hdev->tx_task);
 
 	if (!test_bit(HCI_UP, &hdev->flags))
 		goto done;
@@ -868,10 +738,10 @@
 	skb_queue_purge(&hdev->rx_q);
 	skb_queue_purge(&hdev->cmd_q);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 	inquiry_cache_flush(hdev);
-	hci_conn_hash_flush(hdev);
-	hci_dev_unlock(hdev);
+	hci_conn_hash_flush(hdev, 0);
+	hci_dev_unlock_bh(hdev);
 
 	if (hdev->flush)
 		hdev->flush(hdev);
@@ -884,6 +754,7 @@
 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 
 done:
+	tasklet_enable(&hdev->tx_task);
 	hci_req_unlock(hdev);
 	hci_dev_put(hdev);
 	return ret;
@@ -982,9 +853,9 @@
 
 int hci_get_dev_list(void __user *arg)
 {
-	struct hci_dev *hdev;
 	struct hci_dev_list_req *dl;
 	struct hci_dev_req *dr;
+	struct list_head *p;
 	int n = 0, size, err;
 	__u16 dev_num;
 
@@ -1002,13 +873,16 @@
 
 	dr = dl->dev_req;
 
-	read_lock(&hci_dev_list_lock);
-	list_for_each_entry(hdev, &hci_dev_list, list) {
-		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-			cancel_delayed_work(&hdev->power_off);
+	read_lock_bh(&hci_dev_list_lock);
+	list_for_each(p, &hci_dev_list) {
+		struct hci_dev *hdev;
 
-		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
-			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+		hdev = list_entry(p, struct hci_dev, list);
+
+		hci_del_off_timer(hdev);
+
+		if (!test_bit(HCI_MGMT, &hdev->flags))
+			set_bit(HCI_PAIRABLE, &hdev->flags);
 
 		(dr + n)->dev_id  = hdev->id;
 		(dr + n)->dev_opt = hdev->flags;
@@ -1016,7 +890,7 @@
 		if (++n >= dev_num)
 			break;
 	}
-	read_unlock(&hci_dev_list_lock);
+	read_unlock_bh(&hci_dev_list_lock);
 
 	dl->dev_num = n;
 	size = sizeof(*dl) + n * sizeof(*dr);
@@ -1040,11 +914,10 @@
 	if (!hdev)
 		return -ENODEV;
 
-	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-		cancel_delayed_work_sync(&hdev->power_off);
+	hci_del_off_timer(hdev);
 
-	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
-		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+	if (!test_bit(HCI_MGMT, &hdev->flags))
+		set_bit(HCI_PAIRABLE, &hdev->flags);
 
 	strcpy(di.name, hdev->name);
 	di.bdaddr   = hdev->bdaddr;
@@ -1080,7 +953,7 @@
 	if (!blocked)
 		return 0;
 
-	hci_dev_do_close(hdev);
+	hci_dev_do_close(hdev, 0);
 
 	return 0;
 }
@@ -1098,7 +971,6 @@
 	if (!hdev)
 		return NULL;
 
-	hci_init_sysfs(hdev);
 	skb_queue_head_init(&hdev->driver_init);
 
 	return hdev;
@@ -1118,46 +990,50 @@
 static void hci_power_on(struct work_struct *work)
 {
 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+	int err;
 
 	BT_DBG("%s", hdev->name);
 
-	if (hci_dev_open(hdev->id) < 0)
+	err = hci_dev_open(hdev->id);
+	if (err && err != -EALREADY)
 		return;
 
-	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-		schedule_delayed_work(&hdev->power_off,
-					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+	if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
+				hdev->dev_type == HCI_BREDR)
+		mod_timer(&hdev->off_timer,
+				jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
 
-	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
-		mgmt_index_added(hdev);
+	if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
+				hdev->dev_type == HCI_BREDR)
+		mgmt_index_added(hdev->id);
 }
 
 static void hci_power_off(struct work_struct *work)
 {
-	struct hci_dev *hdev = container_of(work, struct hci_dev,
-							power_off.work);
+	struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
 
 	BT_DBG("%s", hdev->name);
 
-	hci_dev_do_close(hdev);
+	hci_dev_close(hdev->id);
 }
 
-static void hci_discov_off(struct work_struct *work)
+static void hci_auto_off(unsigned long data)
 {
-	struct hci_dev *hdev;
-	u8 scan = SCAN_PAGE;
-
-	hdev = container_of(work, struct hci_dev, discov_off.work);
+	struct hci_dev *hdev = (struct hci_dev *) data;
 
 	BT_DBG("%s", hdev->name);
 
-	hci_dev_lock(hdev);
+	clear_bit(HCI_AUTO_OFF, &hdev->flags);
 
-	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+	queue_work(hdev->workqueue, &hdev->power_off);
+}
 
-	hdev->discov_timeout = 0;
+void hci_del_off_timer(struct hci_dev *hdev)
+{
+	BT_DBG("%s", hdev->name);
 
-	hci_dev_unlock(hdev);
+	clear_bit(HCI_AUTO_OFF, &hdev->flags);
+	del_timer(&hdev->off_timer);
 }
 
 int hci_uuids_clear(struct hci_dev *hdev)
@@ -1192,108 +1068,80 @@
 	return 0;
 }
 
-int hci_smp_ltks_clear(struct hci_dev *hdev)
-{
-	struct smp_ltk *k, *tmp;
-
-	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
-		list_del(&k->list);
-		kfree(k);
-	}
-
-	return 0;
-}
-
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
-	struct link_key *k;
+	struct list_head *p;
 
-	list_for_each_entry(k, &hdev->link_keys, list)
+	list_for_each(p, &hdev->link_keys) {
+		struct link_key *k;
+
+		k = list_entry(p, struct link_key, list);
+
 		if (bacmp(bdaddr, &k->bdaddr) == 0)
 			return k;
+	}
 
 	return NULL;
 }
 
-static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
-						u8 key_type, u8 old_key_type)
+struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
 {
-	/* Legacy key */
-	if (key_type < 0x03)
-		return true;
+	struct list_head *p;
 
-	/* Debug keys are insecure so don't store them persistently */
-	if (key_type == HCI_LK_DEBUG_COMBINATION)
-		return false;
+	list_for_each(p, &hdev->link_keys) {
+		struct link_key *k;
+		struct key_master_id *id;
 
-	/* Changed combination key and there's no previous one */
-	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
-		return false;
+		k = list_entry(p, struct link_key, list);
 
-	/* Security mode 3 case */
-	if (!conn)
-		return true;
-
-	/* Neither local nor remote side had no-bonding as requirement */
-	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
-		return true;
-
-	/* Local side had dedicated bonding as requirement */
-	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
-		return true;
-
-	/* Remote side had dedicated bonding as requirement */
-	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
-		return true;
-
-	/* If none of the above criteria match, then don't store the key
-	 * persistently */
-	return false;
-}
-
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
-{
-	struct smp_ltk *k;
-
-	list_for_each_entry(k, &hdev->long_term_keys, list) {
-		if (k->ediv != ediv ||
-				memcmp(rand, k->rand, sizeof(k->rand)))
+		if (k->key_type != KEY_TYPE_LTK)
 			continue;
 
-		return k;
+		if (k->dlen != sizeof(*id))
+			continue;
+
+		id = (void *) &k->data;
+		if (id->ediv == ediv &&
+				(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
+			return k;
 	}
 
 	return NULL;
 }
 EXPORT_SYMBOL(hci_find_ltk);
 
-struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				     u8 addr_type)
+struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
+					bdaddr_t *bdaddr, u8 type)
 {
-	struct smp_ltk *k;
+	struct list_head *p;
 
-	list_for_each_entry(k, &hdev->long_term_keys, list)
-		if (addr_type == k->bdaddr_type &&
-					bacmp(bdaddr, &k->bdaddr) == 0)
+	list_for_each(p, &hdev->link_keys) {
+		struct link_key *k;
+
+		k = list_entry(p, struct link_key, list);
+
+		if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
 			return k;
+	}
 
 	return NULL;
 }
-EXPORT_SYMBOL(hci_find_ltk_by_addr);
+EXPORT_SYMBOL(hci_find_link_key_type);
 
-int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
-		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+						u8 *val, u8 type, u8 pin_len)
 {
 	struct link_key *key, *old_key;
+	struct hci_conn *conn;
 	u8 old_key_type;
-	bool persistent;
+	u8 bonded = 0;
 
 	old_key = hci_find_link_key(hdev, bdaddr);
 	if (old_key) {
-		old_key_type = old_key->type;
+		old_key_type = old_key->key_type;
 		key = old_key;
 	} else {
-		old_key_type = conn ? conn->key_type : 0xff;
+		old_key_type = 0xff;
 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
 		if (!key)
 			return -ENOMEM;
@@ -1302,72 +1150,76 @@
 
 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
 
-	/* Some buggy controller combinations generate a changed
-	 * combination key for legacy pairing even when there's no
-	 * previous key */
-	if (type == HCI_LK_CHANGED_COMBINATION &&
-					(!conn || conn->remote_auth == 0xff) &&
-					old_key_type == 0xff) {
-		type = HCI_LK_COMBINATION;
-		if (conn)
-			conn->key_type = type;
-	}
-
 	bacpy(&key->bdaddr, bdaddr);
 	memcpy(key->val, val, 16);
+	key->auth = 0x01;
+	key->key_type = type;
 	key->pin_len = pin_len;
 
-	if (type == HCI_LK_CHANGED_COMBINATION)
-		key->type = old_key_type;
-	else
-		key->type = type;
+	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+	/* Store the link key persistently if one of the following is true:
+	 * 1. the remote side is using dedicated bonding since in that case
+	 *    also the local requirements are set to dedicated bonding
+	 * 2. the local side had dedicated bonding as a requirement
+	 * 3. this is a legacy link key
+	 * 4. this is a changed combination key and there was a previously
+	 *    stored one
+	 * If none of the above match only keep the link key around for
+	 * this connection and set the temporary flag for the device.
+	*/
 
-	if (!new_key)
-		return 0;
+	if (conn) {
+		if ((conn->remote_auth > 0x01) ||
+			(conn->auth_initiator && conn->auth_type > 0x01) ||
+			(key->key_type < 0x03) ||
+			(key->key_type == 0x06 && old_key_type != 0xff))
+			bonded = 1;
+	}
 
-	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
+	if (new_key)
+		mgmt_new_key(hdev->id, key, bonded);
 
-	mgmt_new_link_key(hdev, key, persistent);
-
-	if (conn)
-		conn->flush_key = !persistent;
+	if (type == 0x06)
+		key->key_type = old_key_type;
 
 	return 0;
 }
 
-int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
-		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
-		ediv, u8 rand[8])
+int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+			u8 addr_type, u8 key_size, u8 auth,
+			__le16 ediv, u8 rand[8], u8 ltk[16])
 {
-	struct smp_ltk *key, *old_key;
+	struct link_key *key, *old_key;
+	struct key_master_id *id;
 
-	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
-		return 0;
+	BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
+						batostr(bdaddr), addr_type);
 
-	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
-	if (old_key)
+	old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
+	if (old_key) {
 		key = old_key;
-	else {
-		key = kzalloc(sizeof(*key), GFP_ATOMIC);
+	} else {
+		key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
 		if (!key)
 			return -ENOMEM;
-		list_add(&key->list, &hdev->long_term_keys);
+		list_add(&key->list, &hdev->link_keys);
 	}
 
+	key->dlen = sizeof(*id);
+
 	bacpy(&key->bdaddr, bdaddr);
-	key->bdaddr_type = addr_type;
-	memcpy(key->val, tk, sizeof(key->val));
-	key->authenticated = authenticated;
-	key->ediv = ediv;
-	key->enc_size = enc_size;
-	key->type = type;
-	memcpy(key->rand, rand, sizeof(key->rand));
+	key->addr_type = addr_type;
+	memcpy(key->val, ltk, sizeof(key->val));
+	key->key_type = KEY_TYPE_LTK;
+	key->pin_len = key_size;
+	key->auth = auth;
 
-	if (!new_key)
-		return 0;
+	id = (void *) &key->data;
+	id->ediv = ediv;
+	memcpy(id->rand, rand, sizeof(id->rand));
 
-	if (type & HCI_SMP_LTK)
-		mgmt_new_ltk(hdev, key, 1);
+	if (new_key)
+		mgmt_new_key(hdev->id, key, auth & 0x01);
 
 	return 0;
 }
@@ -1388,23 +1240,6 @@
 	return 0;
 }
 
-int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
-	struct smp_ltk *k, *tmp;
-
-	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
-		if (bacmp(bdaddr, &k->bdaddr))
-			continue;
-
-		BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
-
-		list_del(&k->list);
-		kfree(k);
-	}
-
-	return 0;
-}
-
 /* HCI command timer function */
 static void hci_cmd_timer(unsigned long arg)
 {
@@ -1412,11 +1247,12 @@
 
 	BT_ERR("%s command tx timeout", hdev->name);
 	atomic_set(&hdev->cmd_cnt, 1);
-	queue_work(hdev->workqueue, &hdev->cmd_work);
+	clear_bit(HCI_RESET, &hdev->flags);
+	tasklet_schedule(&hdev->cmd_task);
 }
 
 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
-					  bdaddr_t *bdaddr)
+							bdaddr_t *bdaddr)
 {
 	struct oob_data *data;
 
@@ -1455,8 +1291,67 @@
 	return 0;
 }
 
+static void hci_adv_clear(unsigned long arg)
+{
+	struct hci_dev *hdev = (void *) arg;
+
+	hci_adv_entries_clear(hdev);
+}
+
+int hci_adv_entries_clear(struct hci_dev *hdev)
+{
+	struct list_head *p, *n;
+
+	BT_DBG("");
+	write_lock_bh(&hdev->adv_entries_lock);
+
+	list_for_each_safe(p, n, &hdev->adv_entries) {
+		struct adv_entry *entry;
+
+		entry = list_entry(p, struct adv_entry, list);
+
+		list_del(p);
+		kfree(entry);
+	}
+
+	write_unlock_bh(&hdev->adv_entries_lock);
+
+	return 0;
+}
+
+struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+	struct list_head *p;
+	struct adv_entry *res = NULL;
+
+	BT_DBG("");
+	read_lock_bh(&hdev->adv_entries_lock);
+
+	list_for_each(p, &hdev->adv_entries) {
+		struct adv_entry *entry;
+
+		entry = list_entry(p, struct adv_entry, list);
+
+		if (bacmp(bdaddr, &entry->bdaddr) == 0) {
+			res = entry;
+			goto out;
+		}
+	}
+out:
+	read_unlock_bh(&hdev->adv_entries_lock);
+	return res;
+}
+
+static inline int is_connectable_adv(u8 evt_type)
+{
+	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
+		return 1;
+
+	return 0;
+}
+
 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
-			    u8 *randomizer)
+								u8 *randomizer)
 {
 	struct oob_data *data;
 
@@ -1479,258 +1374,75 @@
 	return 0;
 }
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
-	struct bdaddr_list *b;
-
-	list_for_each_entry(b, &hdev->blacklist, list)
-		if (bacmp(bdaddr, &b->bdaddr) == 0)
-			return b;
-
-	return NULL;
-}
-
-int hci_blacklist_clear(struct hci_dev *hdev)
-{
-	struct list_head *p, *n;
-
-	list_for_each_safe(p, n, &hdev->blacklist) {
-		struct bdaddr_list *b;
-
-		b = list_entry(p, struct bdaddr_list, list);
-
-		list_del(p);
-		kfree(b);
-	}
-
-	return 0;
-}
-
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-	struct bdaddr_list *entry;
-
-	if (bacmp(bdaddr, BDADDR_ANY) == 0)
-		return -EBADF;
-
-	if (hci_blacklist_lookup(hdev, bdaddr))
-		return -EEXIST;
-
-	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-
-	bacpy(&entry->bdaddr, bdaddr);
-
-	list_add(&entry->list, &hdev->blacklist);
-
-	return mgmt_device_blocked(hdev, bdaddr, type);
-}
-
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-	struct bdaddr_list *entry;
-
-	if (bacmp(bdaddr, BDADDR_ANY) == 0)
-		return hci_blacklist_clear(hdev);
-
-	entry = hci_blacklist_lookup(hdev, bdaddr);
-	if (!entry)
-		return -ENOENT;
-
-	list_del(&entry->list);
-	kfree(entry);
-
-	return mgmt_device_unblocked(hdev, bdaddr, type);
-}
-
-static void hci_clear_adv_cache(struct work_struct *work)
-{
-	struct hci_dev *hdev = container_of(work, struct hci_dev,
-					    adv_work.work);
-
-	hci_dev_lock(hdev);
-
-	hci_adv_entries_clear(hdev);
-
-	hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
-	struct adv_entry *entry, *tmp;
-
-	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
-		list_del(&entry->list);
-		kfree(entry);
-	}
-
-	BT_DBG("%s adv cache cleared", hdev->name);
-
-	return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_add_adv_entry(struct hci_dev *hdev,
+					struct hci_ev_le_advertising_info *ev)
 {
 	struct adv_entry *entry;
+	u8 flags = 0;
+	int i;
 
-	list_for_each_entry(entry, &hdev->adv_entries, list)
-		if (bacmp(bdaddr, &entry->bdaddr) == 0)
-			return entry;
+	BT_DBG("");
 
-	return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
-	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
-		return 1;
-
-	return 0;
-}
-
-int hci_add_adv_entry(struct hci_dev *hdev,
-					struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
+	if (!is_connectable_adv(ev->evt_type))
 		return -EINVAL;
 
+	if (ev->data && ev->length) {
+		for (i = 0; (i + 2) < ev->length; i++)
+			if (ev->data[i+1] == 0x01) {
+				flags = ev->data[i+2];
+				BT_DBG("flags: %2.2x", flags);
+				break;
+			} else {
+				i += ev->data[i];
+			}
+	}
+
+	entry = hci_find_adv_entry(hdev, &ev->bdaddr);
 	/* Only new entries should be added to adv_entries. So, if
 	 * bdaddr was found, don't add it. */
-	if (hci_find_adv_entry(hdev, &ev->bdaddr))
+	if (entry) {
+		entry->flags = flags;
 		return 0;
+	}
 
-	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 	if (!entry)
 		return -ENOMEM;
 
 	bacpy(&entry->bdaddr, &ev->bdaddr);
 	entry->bdaddr_type = ev->bdaddr_type;
+	entry->flags = flags;
 
+	write_lock(&hdev->adv_entries_lock);
 	list_add(&entry->list, &hdev->adv_entries);
-
-	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
-				batostr(&entry->bdaddr), entry->bdaddr_type);
+	write_unlock(&hdev->adv_entries_lock);
 
 	return 0;
 }
 
-static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
+static struct crypto_blkcipher *alloc_cypher(void)
 {
-	struct le_scan_params *param =  (struct le_scan_params *) opt;
-	struct hci_cp_le_set_scan_param cp;
+	if (enable_smp)
+		return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
 
-	memset(&cp, 0, sizeof(cp));
-	cp.type = param->type;
-	cp.interval = cpu_to_le16(param->interval);
-	cp.window = cpu_to_le16(param->window);
-
-	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
-}
-
-static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
-{
-	struct hci_cp_le_set_scan_enable cp;
-
-	memset(&cp, 0, sizeof(cp));
-	cp.enable = 1;
-
-	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
-			  u16 window, int timeout)
-{
-	long timeo = msecs_to_jiffies(3000);
-	struct le_scan_params param;
-	int err;
-
-	BT_DBG("%s", hdev->name);
-
-	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
-		return -EINPROGRESS;
-
-	param.type = type;
-	param.interval = interval;
-	param.window = window;
-
-	hci_req_lock(hdev);
-
-	err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
-			    timeo);
-	if (!err)
-		err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
-
-	hci_req_unlock(hdev);
-
-	if (err < 0)
-		return err;
-
-	schedule_delayed_work(&hdev->le_scan_disable,
-			      msecs_to_jiffies(timeout));
-
-	return 0;
-}
-
-static void le_scan_disable_work(struct work_struct *work)
-{
-	struct hci_dev *hdev = container_of(work, struct hci_dev,
-					    le_scan_disable.work);
-	struct hci_cp_le_set_scan_enable cp;
-
-	BT_DBG("%s", hdev->name);
-
-	memset(&cp, 0, sizeof(cp));
-
-	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static void le_scan_work(struct work_struct *work)
-{
-	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
-	struct le_scan_params *param = &hdev->le_scan_params;
-
-	BT_DBG("%s", hdev->name);
-
-	hci_do_le_scan(hdev, param->type, param->interval, param->window,
-		       param->timeout);
-}
-
-int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
-		int timeout)
-{
-	struct le_scan_params *param = &hdev->le_scan_params;
-
-	BT_DBG("%s", hdev->name);
-
-	if (work_busy(&hdev->le_scan))
-		return -EINPROGRESS;
-
-	param->type = type;
-	param->interval = interval;
-	param->window = window;
-	param->timeout = timeout;
-
-	queue_work(system_long_wq, &hdev->le_scan);
-
-	return 0;
+	return ERR_PTR(-ENOTSUPP);
 }
 
 /* Register HCI device */
 int hci_register_dev(struct hci_dev *hdev)
 {
 	struct list_head *head = &hci_dev_list, *p;
-	int i, id, error;
+	int i, id;
 
-	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
+						hdev->bus, hdev->owner);
 
-	if (!hdev->open || !hdev->close)
+	if (!hdev->open || !hdev->close || !hdev->destruct)
 		return -EINVAL;
 
-	/* Do not allow HCI_AMP devices to register at index 0,
-	 * so the index can be used as the AMP controller ID.
-	 */
 	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
 
-	write_lock(&hci_dev_list_lock);
+	write_lock_bh(&hci_dev_list_lock);
 
 	/* Find first available device id */
 	list_for_each(p, &hci_dev_list) {
@@ -1741,12 +1453,12 @@
 
 	sprintf(hdev->name, "hci%d", id);
 	hdev->id = id;
-	list_add_tail(&hdev->list, head);
+	list_add(&hdev->list, head);
 
-	mutex_init(&hdev->lock);
+	atomic_set(&hdev->refcnt, 1);
+	spin_lock_init(&hdev->lock);
 
 	hdev->flags = 0;
-	hdev->dev_flags = 0;
 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
 	hdev->esco_type = (ESCO_HV1);
 	hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1756,16 +1468,19 @@
 	hdev->sniff_max_interval = 800;
 	hdev->sniff_min_interval = 80;
 
-	INIT_WORK(&hdev->rx_work, hci_rx_work);
-	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
-	INIT_WORK(&hdev->tx_work, hci_tx_work);
-
+	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
+	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
+	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
 
 	skb_queue_head_init(&hdev->rx_q);
 	skb_queue_head_init(&hdev->cmd_q);
 	skb_queue_head_init(&hdev->raw_q);
 
 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+	setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
+						(unsigned long) hdev);
+	setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
+						(unsigned long) hdev);
 
 	for (i = 0; i < NUM_REASSEMBLY; i++)
 		hdev->reassembly[i] = NULL;
@@ -1773,49 +1488,43 @@
 	init_waitqueue_head(&hdev->req_wait_q);
 	mutex_init(&hdev->req_lock);
 
-	discovery_init(hdev);
+	inquiry_cache_init(hdev);
 
 	hci_conn_hash_init(hdev);
-
-	INIT_LIST_HEAD(&hdev->mgmt_pending);
+	hci_chan_list_init(hdev);
 
 	INIT_LIST_HEAD(&hdev->blacklist);
 
 	INIT_LIST_HEAD(&hdev->uuids);
 
 	INIT_LIST_HEAD(&hdev->link_keys);
-	INIT_LIST_HEAD(&hdev->long_term_keys);
 
 	INIT_LIST_HEAD(&hdev->remote_oob_data);
 
 	INIT_LIST_HEAD(&hdev->adv_entries);
+	rwlock_init(&hdev->adv_entries_lock);
+	setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
 
-	INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
 	INIT_WORK(&hdev->power_on, hci_power_on);
-	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
-
-	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+	INIT_WORK(&hdev->power_off, hci_power_off);
+	setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
 
 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
 	atomic_set(&hdev->promisc, 0);
 
-	INIT_WORK(&hdev->le_scan, le_scan_work);
+	write_unlock_bh(&hci_dev_list_lock);
 
-	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+	hdev->workqueue = create_singlethread_workqueue(hdev->name);
+	if (!hdev->workqueue)
+		goto nomem;
 
-	write_unlock(&hci_dev_list_lock);
+	hdev->tfm = alloc_cypher();
+	if (IS_ERR(hdev->tfm))
+		BT_INFO("Failed to load transform for ecb(aes): %ld",
+							PTR_ERR(hdev->tfm));
 
-	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
-							WQ_MEM_RECLAIM, 1);
-	if (!hdev->workqueue) {
-		error = -ENOMEM;
-		goto err;
-	}
-
-	error = hci_add_sysfs(hdev);
-	if (error < 0)
-		goto err_wqueue;
+	hci_register_sysfs(hdev);
 
 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1826,54 +1535,49 @@
 		}
 	}
 
-	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
-	set_bit(HCI_SETUP, &hdev->dev_flags);
-	schedule_work(&hdev->power_on);
+	set_bit(HCI_AUTO_OFF, &hdev->flags);
+	set_bit(HCI_SETUP, &hdev->flags);
+	queue_work(hdev->workqueue, &hdev->power_on);
 
 	hci_notify(hdev, HCI_DEV_REG);
-	hci_dev_hold(hdev);
 
 	return id;
 
-err_wqueue:
-	destroy_workqueue(hdev->workqueue);
-err:
-	write_lock(&hci_dev_list_lock);
+nomem:
+	write_lock_bh(&hci_dev_list_lock);
 	list_del(&hdev->list);
-	write_unlock(&hci_dev_list_lock);
+	write_unlock_bh(&hci_dev_list_lock);
 
-	return error;
+	return -ENOMEM;
 }
 EXPORT_SYMBOL(hci_register_dev);
 
 /* Unregister HCI device */
-void hci_unregister_dev(struct hci_dev *hdev)
+int hci_unregister_dev(struct hci_dev *hdev)
 {
 	int i;
 
 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
-
-	write_lock(&hci_dev_list_lock);
+	write_lock_bh(&hci_dev_list_lock);
 	list_del(&hdev->list);
-	write_unlock(&hci_dev_list_lock);
+	write_unlock_bh(&hci_dev_list_lock);
 
-	hci_dev_do_close(hdev);
+	hci_dev_do_close(hdev, hdev->bus == HCI_SMD);
 
 	for (i = 0; i < NUM_REASSEMBLY; i++)
 		kfree_skb(hdev->reassembly[i]);
 
 	if (!test_bit(HCI_INIT, &hdev->flags) &&
-				!test_bit(HCI_SETUP, &hdev->dev_flags)) {
-		hci_dev_lock(hdev);
-		mgmt_index_removed(hdev);
-		hci_dev_unlock(hdev);
+				!test_bit(HCI_SETUP, &hdev->flags) &&
+				hdev->dev_type == HCI_BREDR) {
+		hci_dev_lock_bh(hdev);
+		mgmt_index_removed(hdev->id);
+		hci_dev_unlock_bh(hdev);
 	}
 
-	/* mgmt_index_removed should take care of emptying the
-	 * pending list */
-	BUG_ON(!list_empty(&hdev->mgmt_pending));
+	if (!IS_ERR(hdev->tfm))
+		crypto_free_blkcipher(hdev->tfm);
 
 	hci_notify(hdev, HCI_DEV_UNREG);
 
@@ -1882,22 +1586,28 @@
 		rfkill_destroy(hdev->rfkill);
 	}
 
-	hci_del_sysfs(hdev);
+	hci_unregister_sysfs(hdev);
 
-	cancel_delayed_work_sync(&hdev->adv_work);
+	/* Disable all timers */
+	hci_del_off_timer(hdev);
+	del_timer(&hdev->adv_timer);
+	del_timer(&hdev->cmd_timer);
+	del_timer(&hdev->disco_timer);
+	del_timer(&hdev->disco_le_timer);
 
 	destroy_workqueue(hdev->workqueue);
 
-	hci_dev_lock(hdev);
+	hci_dev_lock_bh(hdev);
 	hci_blacklist_clear(hdev);
 	hci_uuids_clear(hdev);
 	hci_link_keys_clear(hdev);
-	hci_smp_ltks_clear(hdev);
 	hci_remote_oob_data_clear(hdev);
 	hci_adv_entries_clear(hdev);
-	hci_dev_unlock(hdev);
+	hci_dev_unlock_bh(hdev);
 
-	hci_dev_put(hdev);
+	__hci_dev_put(hdev);
+
+	return 0;
 }
 EXPORT_SYMBOL(hci_unregister_dev);
 
@@ -1933,8 +1643,9 @@
 	/* Time stamp */
 	__net_timestamp(skb);
 
+	/* Queue frame for rx task */
 	skb_queue_tail(&hdev->rx_q, skb);
-	queue_work(hdev->workqueue, &hdev->rx_work);
+	tasklet_schedule(&hdev->rx_task);
 
 	return 0;
 }
@@ -1985,7 +1696,7 @@
 
 	while (count) {
 		scb = (void *) skb->cb;
-		len = min_t(uint, scb->expect, count);
+		len = min(scb->expect, (__u16)count);
 
 		memcpy(skb_put(skb, len), data, len);
 
@@ -2063,7 +1774,7 @@
 
 		data += (count - rem);
 		count = rem;
-	}
+	};
 
 	return rem;
 }
@@ -2098,7 +1809,7 @@
 
 		data += (count - rem);
 		count = rem;
-	}
+	};
 
 	return rem;
 }
@@ -2106,13 +1817,59 @@
 
 /* ---- Interface to upper protocols ---- */
 
+/* Register/Unregister protocols.
+ * hci_task_lock is used to ensure that no tasks are running. */
+int hci_register_proto(struct hci_proto *hp)
+{
+	int err = 0;
+
+	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
+
+	if (hp->id >= HCI_MAX_PROTO)
+		return -EINVAL;
+
+	write_lock_bh(&hci_task_lock);
+
+	if (!hci_proto[hp->id])
+		hci_proto[hp->id] = hp;
+	else
+		err = -EEXIST;
+
+	write_unlock_bh(&hci_task_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(hci_register_proto);
+
+int hci_unregister_proto(struct hci_proto *hp)
+{
+	int err = 0;
+
+	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
+
+	if (hp->id >= HCI_MAX_PROTO)
+		return -EINVAL;
+
+	write_lock_bh(&hci_task_lock);
+
+	if (hci_proto[hp->id])
+		hci_proto[hp->id] = NULL;
+	else
+		err = -ENOENT;
+
+	write_unlock_bh(&hci_task_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(hci_unregister_proto);
+
 int hci_register_cb(struct hci_cb *cb)
 {
 	BT_DBG("%p name %s", cb, cb->name);
 
-	write_lock(&hci_cb_list_lock);
+	write_lock_bh(&hci_cb_list_lock);
 	list_add(&cb->list, &hci_cb_list);
-	write_unlock(&hci_cb_list_lock);
+	write_unlock_bh(&hci_cb_list_lock);
 
 	return 0;
 }
@@ -2122,14 +1879,82 @@
 {
 	BT_DBG("%p name %s", cb, cb->name);
 
-	write_lock(&hci_cb_list_lock);
+	write_lock_bh(&hci_cb_list_lock);
 	list_del(&cb->list);
-	write_unlock(&hci_cb_list_lock);
+	write_unlock_bh(&hci_cb_list_lock);
 
 	return 0;
 }
 EXPORT_SYMBOL(hci_unregister_cb);
 
+int hci_register_amp(struct amp_mgr_cb *cb)
+{
+	BT_DBG("%p", cb);
+
+	write_lock_bh(&amp_mgr_cb_list_lock);
+	list_add(&cb->list, &amp_mgr_cb_list);
+	write_unlock_bh(&amp_mgr_cb_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(hci_register_amp);
+
+int hci_unregister_amp(struct amp_mgr_cb *cb)
+{
+	BT_DBG("%p", cb);
+
+	write_lock_bh(&amp_mgr_cb_list_lock);
+	list_del(&cb->list);
+	write_unlock_bh(&amp_mgr_cb_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(hci_unregister_amp);
+
+void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
+			struct sk_buff *skb)
+{
+	struct amp_mgr_cb *cb;
+
+	BT_DBG("opcode 0x%x", opcode);
+
+	read_lock_bh(&amp_mgr_cb_list_lock);
+	list_for_each_entry(cb, &amp_mgr_cb_list, list) {
+		if (cb->amp_cmd_complete_event)
+			cb->amp_cmd_complete_event(hdev, opcode, skb);
+	}
+	read_unlock_bh(&amp_mgr_cb_list_lock);
+}
+
+void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
+{
+	struct amp_mgr_cb *cb;
+
+	BT_DBG("opcode 0x%x, status %d", opcode, status);
+
+	read_lock_bh(&amp_mgr_cb_list_lock);
+	list_for_each_entry(cb, &amp_mgr_cb_list, list) {
+		if (cb->amp_cmd_status_event)
+			cb->amp_cmd_status_event(hdev, opcode, status);
+	}
+	read_unlock_bh(&amp_mgr_cb_list_lock);
+}
+
+void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
+			struct sk_buff *skb)
+{
+	struct amp_mgr_cb *cb;
+
+	BT_DBG("ev_code 0x%x", ev_code);
+
+	read_lock_bh(&amp_mgr_cb_list_lock);
+	list_for_each_entry(cb, &amp_mgr_cb_list, list) {
+		if (cb->amp_event)
+			cb->amp_event(hdev, ev_code, skb);
+	}
+	read_unlock_bh(&amp_mgr_cb_list_lock);
+}
+
 static int hci_send_frame(struct sk_buff *skb)
 {
 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
@@ -2141,20 +1966,17 @@
 
 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
-	/* Time stamp */
-	__net_timestamp(skb);
-
-	/* Send copy to monitor */
-	hci_send_to_monitor(hdev, skb);
-
 	if (atomic_read(&hdev->promisc)) {
-		/* Send copy to the sockets */
-		hci_send_to_sock(hdev, skb);
+		/* Time stamp */
+		__net_timestamp(skb);
+
+		hci_send_to_sock(hdev, skb, NULL);
 	}
 
 	/* Get rid of skb owner, prior to sending to the driver. */
 	skb_orphan(skb);
 
+	hci_notify(hdev, HCI_DEV_WRITE);
 	return hdev->send(skb);
 }
 
@@ -2189,10 +2011,11 @@
 		hdev->init_last_cmd = opcode;
 
 	skb_queue_tail(&hdev->cmd_q, skb);
-	queue_work(hdev->workqueue, &hdev->cmd_work);
+	tasklet_schedule(&hdev->cmd_task);
 
 	return 0;
 }
+EXPORT_SYMBOL(hci_send_cmd);
 
 /* Get data from the previously sent command */
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
@@ -2225,18 +2048,27 @@
 	hdr->dlen   = cpu_to_le16(len);
 }
 
-static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
-				struct sk_buff *skb, __u16 flags)
+void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
+		struct sk_buff *skb, __u16 flags)
 {
 	struct hci_dev *hdev = conn->hdev;
 	struct sk_buff *list;
 
+	BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
+
+	skb->dev = (void *) hdev;
+	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+	if (hdev->dev_type == HCI_BREDR)
+		hci_add_acl_hdr(skb, conn->handle, flags);
+	else
+		hci_add_acl_hdr(skb, chan->ll_handle, flags);
+
 	list = skb_shinfo(skb)->frag_list;
 	if (!list) {
 		/* Non fragmented */
 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
 
-		skb_queue_tail(queue, skb);
+		skb_queue_tail(&conn->data_q, skb);
 	} else {
 		/* Fragmented */
 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -2244,11 +2076,10 @@
 		skb_shinfo(skb)->frag_list = NULL;
 
 		/* Queue all fragments atomically */
-		spin_lock(&queue->lock);
+		spin_lock_bh(&conn->data_q.lock);
 
-		__skb_queue_tail(queue, skb);
-
-		flags &= ~ACL_START;
+		__skb_queue_tail(&conn->data_q, skb);
+		flags &= ~ACL_PB_MASK;
 		flags |= ACL_CONT;
 		do {
 			skb = list; list = list->next;
@@ -2259,27 +2090,13 @@
 
 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
 
-			__skb_queue_tail(queue, skb);
+			__skb_queue_tail(&conn->data_q, skb);
 		} while (list);
 
-		spin_unlock(&queue->lock);
+		spin_unlock_bh(&conn->data_q.lock);
 	}
-}
 
-void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
-{
-	struct hci_conn *conn = chan->conn;
-	struct hci_dev *hdev = conn->hdev;
-
-	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
-
-	skb->dev = (void *) hdev;
-	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-	hci_add_acl_hdr(skb, conn->handle, flags);
-
-	hci_queue_acl(conn, &chan->data_q, skb, flags);
-
-	queue_work(hdev->workqueue, &hdev->tx_work);
+	tasklet_schedule(&hdev->tx_task);
 }
 EXPORT_SYMBOL(hci_send_acl);
 
@@ -2302,7 +2119,7 @@
 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
 
 	skb_queue_tail(&conn->data_q, skb);
-	queue_work(hdev->workqueue, &hdev->tx_work);
+	tasklet_schedule(&hdev->tx_task);
 }
 EXPORT_SYMBOL(hci_send_sco);
 
@@ -2312,15 +2129,16 @@
 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct hci_conn *conn = NULL, *c;
+	struct hci_conn *conn = NULL;
 	int num = 0, min = ~0;
+	struct list_head *p;
 
 	/* We don't have to lock device here. Connections are always
 	 * added and removed with TX task disabled. */
+	list_for_each(p, &h->list) {
+		struct hci_conn *c;
+		c = list_entry(p, struct hci_conn, list);
 
-	rcu_read_lock();
-
-	list_for_each_entry_rcu(c, &h->list, list) {
 		if (c->type != type || skb_queue_empty(&c->data_q))
 			continue;
 
@@ -2333,13 +2151,8 @@
 			min  = c->sent;
 			conn = c;
 		}
-
-		if (hci_conn_num(hdev, type) == num)
-			break;
 	}
 
-	rcu_read_unlock();
-
 	if (conn) {
 		int cnt, q;
 
@@ -2371,269 +2184,66 @@
 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
 {
 	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct hci_conn *c;
+	struct list_head *p;
+	struct hci_conn  *c;
 
 	BT_ERR("%s link tx timeout", hdev->name);
 
-	rcu_read_lock();
-
 	/* Kill stalled connections */
-	list_for_each_entry_rcu(c, &h->list, list) {
+	list_for_each(p, &h->list) {
+		c = list_entry(p, struct hci_conn, list);
 		if (c->type == type && c->sent) {
 			BT_ERR("%s killing stalled connection %s",
 				hdev->name, batostr(&c->dst));
 			hci_acl_disconn(c, 0x13);
 		}
 	}
-
-	rcu_read_unlock();
-}
-
-static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
-						int *quote)
-{
-	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct hci_chan *chan = NULL;
-	int num = 0, min = ~0, cur_prio = 0;
-	struct hci_conn *conn;
-	int cnt, q, conn_num = 0;
-
-	BT_DBG("%s", hdev->name);
-
-	rcu_read_lock();
-
-	list_for_each_entry_rcu(conn, &h->list, list) {
-		struct hci_chan *tmp;
-
-		if (conn->type != type)
-			continue;
-
-		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
-			continue;
-
-		conn_num++;
-
-		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
-			struct sk_buff *skb;
-
-			if (skb_queue_empty(&tmp->data_q))
-				continue;
-
-			skb = skb_peek(&tmp->data_q);
-			if (skb->priority < cur_prio)
-				continue;
-
-			if (skb->priority > cur_prio) {
-				num = 0;
-				min = ~0;
-				cur_prio = skb->priority;
-			}
-
-			num++;
-
-			if (conn->sent < min) {
-				min  = conn->sent;
-				chan = tmp;
-			}
-		}
-
-		if (hci_conn_num(hdev, type) == conn_num)
-			break;
-	}
-
-	rcu_read_unlock();
-
-	if (!chan)
-		return NULL;
-
-	switch (chan->conn->type) {
-	case ACL_LINK:
-		cnt = hdev->acl_cnt;
-		break;
-	case SCO_LINK:
-	case ESCO_LINK:
-		cnt = hdev->sco_cnt;
-		break;
-	case LE_LINK:
-		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
-		break;
-	default:
-		cnt = 0;
-		BT_ERR("Unknown link type");
-	}
-
-	q = cnt / num;
-	*quote = q ? q : 1;
-	BT_DBG("chan %p quote %d", chan, *quote);
-	return chan;
-}
-
-static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
-{
-	struct hci_conn_hash *h = &hdev->conn_hash;
-	struct hci_conn *conn;
-	int num = 0;
-
-	BT_DBG("%s", hdev->name);
-
-	rcu_read_lock();
-
-	list_for_each_entry_rcu(conn, &h->list, list) {
-		struct hci_chan *chan;
-
-		if (conn->type != type)
-			continue;
-
-		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
-			continue;
-
-		num++;
-
-		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
-			struct sk_buff *skb;
-
-			if (chan->sent) {
-				chan->sent = 0;
-				continue;
-			}
-
-			if (skb_queue_empty(&chan->data_q))
-				continue;
-
-			skb = skb_peek(&chan->data_q);
-			if (skb->priority >= HCI_PRIO_MAX - 1)
-				continue;
-
-			skb->priority = HCI_PRIO_MAX - 1;
-
-			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
-								skb->priority);
-		}
-
-		if (hci_conn_num(hdev, type) == num)
-			break;
-	}
-
-	rcu_read_unlock();
-
-}
-
-static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
-{
-	/* Calculate count of blocks used by this packet */
-	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
-}
-
-static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
-{
-	if (!test_bit(HCI_RAW, &hdev->flags)) {
-		/* ACL tx timeout must be longer than maximum
-		 * link supervision timeout (40.9 seconds) */
-		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
-					msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
-			hci_link_tx_to(hdev, ACL_LINK);
-	}
-}
-
-static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
-{
-	unsigned int cnt = hdev->acl_cnt;
-	struct hci_chan *chan;
-	struct sk_buff *skb;
-	int quote;
-
-	__check_timeout(hdev, cnt);
-
-	while (hdev->acl_cnt &&
-			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
-		u32 priority = (skb_peek(&chan->data_q))->priority;
-		while (quote-- && (skb = skb_peek(&chan->data_q))) {
-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
-					skb->len, skb->priority);
-
-			/* Stop if priority has changed */
-			if (skb->priority < priority)
-				break;
-
-			skb = skb_dequeue(&chan->data_q);
-
-			hci_conn_enter_active_mode(chan->conn,
-						   bt_cb(skb)->force_active);
-
-			hci_send_frame(skb);
-			hdev->acl_last_tx = jiffies;
-
-			hdev->acl_cnt--;
-			chan->sent++;
-			chan->conn->sent++;
-		}
-	}
-
-	if (cnt != hdev->acl_cnt)
-		hci_prio_recalculate(hdev, ACL_LINK);
-}
-
-static inline void hci_sched_acl_blk(struct hci_dev *hdev)
-{
-	unsigned int cnt = hdev->block_cnt;
-	struct hci_chan *chan;
-	struct sk_buff *skb;
-	int quote;
-
-	__check_timeout(hdev, cnt);
-
-	while (hdev->block_cnt > 0 &&
-			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
-		u32 priority = (skb_peek(&chan->data_q))->priority;
-		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
-			int blocks;
-
-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
-						skb->len, skb->priority);
-
-			/* Stop if priority has changed */
-			if (skb->priority < priority)
-				break;
-
-			skb = skb_dequeue(&chan->data_q);
-
-			blocks = __get_blocks(hdev, skb);
-			if (blocks > hdev->block_cnt)
-				return;
-
-			hci_conn_enter_active_mode(chan->conn,
-						bt_cb(skb)->force_active);
-
-			hci_send_frame(skb);
-			hdev->acl_last_tx = jiffies;
-
-			hdev->block_cnt -= blocks;
-			quote -= blocks;
-
-			chan->sent += blocks;
-			chan->conn->sent += blocks;
-		}
-	}
-
-	if (cnt != hdev->block_cnt)
-		hci_prio_recalculate(hdev, ACL_LINK);
 }
 
 static inline void hci_sched_acl(struct hci_dev *hdev)
 {
+	struct hci_conn *conn;
+	struct sk_buff *skb;
+	int quote;
+
 	BT_DBG("%s", hdev->name);
 
-	if (!hci_conn_num(hdev, ACL_LINK))
-		return;
+	if (!test_bit(HCI_RAW, &hdev->flags)) {
+		/* ACL tx timeout must be longer than maximum
+		 * link supervision timeout (40.9 seconds) */
+		if (hdev->acl_cnt <= 0 &&
+			time_after(jiffies, hdev->acl_last_tx + HZ * 45))
+			hci_link_tx_to(hdev, ACL_LINK);
+	}
 
-	switch (hdev->flow_ctl_mode) {
-	case HCI_FLOW_CTL_MODE_PACKET_BASED:
-		hci_sched_acl_pkt(hdev);
-		break;
+	while (hdev->acl_cnt > 0 &&
+		(conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
+		while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
+			int count = 1;
 
-	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
-		hci_sched_acl_blk(hdev);
-		break;
+			BT_DBG("skb %p len %d", skb, skb->len);
+
+			if (hdev->flow_ctl_mode ==
+				HCI_BLOCK_BASED_FLOW_CTL_MODE)
+				/* Calculate count of blocks used by
+				 * this packet
+				 */
+				count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
+					hdev->data_block_len) + 1;
+
+			if (count > hdev->acl_cnt)
+				return;
+
+			hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+
+			hci_send_frame(skb);
+			hdev->acl_last_tx = jiffies;
+
+			hdev->acl_cnt -= count;
+			quote -= count;
+
+			conn->sent += count;
+		}
 	}
 }
 
@@ -2646,9 +2256,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!hci_conn_num(hdev, SCO_LINK))
-		return;
-
 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
 			BT_DBG("skb %p len %d", skb, skb->len);
@@ -2669,9 +2276,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!hci_conn_num(hdev, ESCO_LINK))
-		return;
-
 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
 			BT_DBG("skb %p len %d", skb, skb->len);
@@ -2686,15 +2290,12 @@
 
 static inline void hci_sched_le(struct hci_dev *hdev)
 {
-	struct hci_chan *chan;
+	struct hci_conn *conn;
 	struct sk_buff *skb;
-	int quote, cnt, tmp;
+	int quote, cnt;
 
 	BT_DBG("%s", hdev->name);
 
-	if (!hci_conn_num(hdev, LE_LINK))
-		return;
-
 	if (!test_bit(HCI_RAW, &hdev->flags)) {
 		/* LE tx timeout must be longer than maximum
 		 * link supervision timeout (40.9 seconds) */
@@ -2704,42 +2305,30 @@
 	}
 
 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
-	tmp = cnt;
-	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
-		u32 priority = (skb_peek(&chan->data_q))->priority;
-		while (quote-- && (skb = skb_peek(&chan->data_q))) {
-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
-					skb->len, skb->priority);
-
-			/* Stop if priority has changed */
-			if (skb->priority < priority)
-				break;
-
-			skb = skb_dequeue(&chan->data_q);
+	while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
+		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+			BT_DBG("skb %p len %d", skb, skb->len);
 
 			hci_send_frame(skb);
 			hdev->le_last_tx = jiffies;
 
 			cnt--;
-			chan->sent++;
-			chan->conn->sent++;
+			conn->sent++;
 		}
 	}
-
 	if (hdev->le_pkts)
 		hdev->le_cnt = cnt;
 	else
 		hdev->acl_cnt = cnt;
-
-	if (cnt != tmp)
-		hci_prio_recalculate(hdev, LE_LINK);
 }
 
-static void hci_tx_work(struct work_struct *work)
+static void hci_tx_task(unsigned long arg)
 {
-	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
+	struct hci_dev *hdev = (struct hci_dev *) arg;
 	struct sk_buff *skb;
 
+	read_lock(&hci_task_lock);
+
 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
 		hdev->sco_cnt, hdev->le_cnt);
 
@@ -2756,9 +2345,11 @@
 	/* Send next queued raw (unknown type) packet */
 	while ((skb = skb_dequeue(&hdev->raw_q)))
 		hci_send_frame(skb);
+
+	read_unlock(&hci_task_lock);
 }
 
-/* ----- HCI RX task (incoming data processing) ----- */
+/* ----- HCI RX task (incoming data proccessing) ----- */
 
 /* ACL data packet */
 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2782,19 +2373,16 @@
 	hci_dev_unlock(hdev);
 
 	if (conn) {
-		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
+		register struct hci_proto *hp;
 
-		hci_dev_lock(hdev);
-		if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
-		    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
-			mgmt_device_connected(hdev, &conn->dst, conn->type,
-					      conn->dst_type, 0, NULL, 0,
-					      conn->dev_class);
-		hci_dev_unlock(hdev);
+		hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
 
 		/* Send to upper protocol */
-		l2cap_recv_acldata(conn, skb, flags);
-		return;
+		hp = hci_proto[HCI_PROTO_L2CAP];
+		if (hp && hp->recv_acldata) {
+			hp->recv_acldata(conn, skb, flags);
+			return;
+		}
 	} else {
 		BT_ERR("%s ACL packet for unknown connection handle %d",
 			hdev->name, handle);
@@ -2823,9 +2411,14 @@
 	hci_dev_unlock(hdev);
 
 	if (conn) {
+		register struct hci_proto *hp;
+
 		/* Send to upper protocol */
-		sco_recv_scodata(conn, skb);
-		return;
+		hp = hci_proto[HCI_PROTO_SCO];
+		if (hp && hp->recv_scodata) {
+			hp->recv_scodata(conn, skb);
+			return;
+		}
 	} else {
 		BT_ERR("%s SCO packet for unknown connection handle %d",
 			hdev->name, handle);
@@ -2834,20 +2427,19 @@
 	kfree_skb(skb);
 }
 
-static void hci_rx_work(struct work_struct *work)
+static void hci_rx_task(unsigned long arg)
 {
-	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
+	struct hci_dev *hdev = (struct hci_dev *) arg;
 	struct sk_buff *skb;
 
 	BT_DBG("%s", hdev->name);
 
-	while ((skb = skb_dequeue(&hdev->rx_q))) {
-		/* Send copy to monitor */
-		hci_send_to_monitor(hdev, skb);
+	read_lock(&hci_task_lock);
 
+	while ((skb = skb_dequeue(&hdev->rx_q))) {
 		if (atomic_read(&hdev->promisc)) {
 			/* Send copy to the sockets */
-			hci_send_to_sock(hdev, skb);
+			hci_send_to_sock(hdev, skb, NULL);
 		}
 
 		if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -2868,7 +2460,6 @@
 		/* Process frame */
 		switch (bt_cb(skb)->pkt_type) {
 		case HCI_EVENT_PKT:
-			BT_DBG("%s Event packet", hdev->name);
 			hci_event_packet(hdev, skb);
 			break;
 
@@ -2887,11 +2478,13 @@
 			break;
 		}
 	}
+
+	read_unlock(&hci_task_lock);
 }
 
-static void hci_cmd_work(struct work_struct *work)
+static void hci_cmd_task(unsigned long arg)
 {
-	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
+	struct hci_dev *hdev = (struct hci_dev *) arg;
 	struct sk_buff *skb;
 
 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2908,44 +2501,14 @@
 		if (hdev->sent_cmd) {
 			atomic_dec(&hdev->cmd_cnt);
 			hci_send_frame(skb);
-			if (test_bit(HCI_RESET, &hdev->flags))
-				del_timer(&hdev->cmd_timer);
-			else
-				mod_timer(&hdev->cmd_timer,
+			mod_timer(&hdev->cmd_timer,
 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
 		} else {
 			skb_queue_head(&hdev->cmd_q, skb);
-			queue_work(hdev->workqueue, &hdev->cmd_work);
+			tasklet_schedule(&hdev->cmd_task);
 		}
 	}
 }
 
-int hci_do_inquiry(struct hci_dev *hdev, u8 length)
-{
-	/* General inquiry access code (GIAC) */
-	u8 lap[3] = { 0x33, 0x8b, 0x9e };
-	struct hci_cp_inquiry cp;
-
-	BT_DBG("%s", hdev->name);
-
-	if (test_bit(HCI_INQUIRY, &hdev->flags))
-		return -EINPROGRESS;
-
-	inquiry_cache_flush(hdev);
-
-	memset(&cp, 0, sizeof(cp));
-	memcpy(&cp.lap, lap, sizeof(cp.lap));
-	cp.length  = length;
-
-	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
-}
-
-int hci_cancel_inquiry(struct hci_dev *hdev)
-{
-	BT_DBG("%s", hdev->name);
-
-	if (!test_bit(HCI_INQUIRY, &hdev->flags))
-		return -EPERM;
-
-	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-}
+module_param(enable_smp, bool, 0644);
+MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");