blob: c2251e4c3b722cfc07792f3fc482395add301042 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Marcel Holtmannb78752c2010-08-08 23:06:53 -040057static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020058static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020059static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* ---- HCI notifications ---- */
70
Marcel Holtmann65164552005-10-28 19:20:48 +020071static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Marcel Holtmann040030e2012-02-20 14:50:37 +010073 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* ---- HCI requests ---- */
77
Johan Hedberg23bb5762010-12-21 23:01:27 +020078void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Johan Hedberg23bb5762010-12-21 23:01:27 +020080 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
Johan Hedberga5040ef2011-01-10 13:28:59 +020082 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020085 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
Marcel Holtmanna418b892008-11-30 12:17:28 +0100333 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900339/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200343 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200351 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
Andre Guedes6fbe1952012-02-03 17:47:58 -0300367 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300368 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200370 return true;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 default:
373 return false;
374 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375}
376
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200388 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 break;
390 case DISCOVERY_STARTING:
391 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300392 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 mgmt_discovering(hdev, 1);
394 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200395 case DISCOVERY_RESOLVING:
396 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
Johan Hedberg30883512012-01-04 14:16:21 +0200420 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200427 return e;
428 }
429
430 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Johan Hedberg561aafb2012-01-04 13:31:59 +0200433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300434 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435{
Johan Hedberg30883512012-01-04 14:16:21 +0200436 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300450 bdaddr_t *bdaddr,
451 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
Johan Hedberga3d4e202012-01-09 00:53:02 +0200468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300469 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
Johan Hedberg31754052012-01-04 13:39:52 +0200487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300488 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Johan Hedberg30883512012-01-04 14:16:21 +0200490 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200495 if (ssp)
496 *ssp = data->ssp_mode;
497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200499 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200500 if (ie->data.ssp_mode && ssp)
501 *ssp = true;
502
Johan Hedberga3d4e202012-01-09 00:53:02 +0200503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
508
Johan Hedberg561aafb2012-01-04 13:31:59 +0200509 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200510 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511
Johan Hedberg561aafb2012-01-04 13:31:59 +0200512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200515 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
525
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200533 memcpy(&ie->data, data, sizeof(*data));
534 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544{
Johan Hedberg30883512012-01-04 14:16:21 +0200545 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct inquiry_info *info = (struct inquiry_info *) buf;
547 struct inquiry_entry *e;
548 int copied = 0;
549
Johan Hedberg561aafb2012-01-04 13:31:59 +0200550 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
553 if (copied >= num)
554 break;
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 bacpy(&info->bdaddr, &data->bdaddr);
557 info->pscan_rep_mode = data->pscan_rep_mode;
558 info->pscan_period_mode = data->pscan_period_mode;
559 info->pscan_mode = data->pscan_mode;
560 memcpy(info->dev_class, data->dev_class, 3);
561 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200564 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
567 BT_DBG("cache %p, copied %d", cache, copied);
568 return copied;
569}
570
571static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572{
573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574 struct hci_cp_inquiry cp;
575
576 BT_DBG("%s", hdev->name);
577
578 if (test_bit(HCI_INQUIRY, &hdev->flags))
579 return;
580
581 /* Start Inquiry */
582 memcpy(&cp.lap, &ir->lap, 3);
583 cp.length = ir->length;
584 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586}
587
588int hci_inquiry(void __user *arg)
589{
590 __u8 __user *ptr = arg;
591 struct hci_inquiry_req ir;
592 struct hci_dev *hdev;
593 int err = 0, do_inquiry = 0, max_rsp;
594 long timeo;
595 __u8 *buf;
596
597 if (copy_from_user(&ir, ptr, sizeof(ir)))
598 return -EFAULT;
599
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200600 hdev = hci_dev_get(ir.dev_id);
601 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return -ENODEV;
603
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300604 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200606 inquiry_cache_empty(hdev) ||
607 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 inquiry_cache_flush(hdev);
609 do_inquiry = 1;
610 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300611 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Marcel Holtmann04837f62006-07-03 10:02:33 +0200613 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614
615 if (do_inquiry) {
616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617 if (err < 0)
618 goto done;
619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /* for unlimited number of responses we will use buffer with 255 entries */
622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625 * copy it to the user space.
626 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200628 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -ENOMEM;
630 goto done;
631 }
632
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300633 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 BT_DBG("num_rsp %d", ir.num_rsp);
638
639 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 ptr += sizeof(ir);
641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 ir.num_rsp))
643 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900644 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 err = -EFAULT;
646
647 kfree(buf);
648
649done:
650 hci_dev_put(hdev);
651 return err;
652}
653
654/* ---- HCI ioctl helpers ---- */
655
656int hci_dev_open(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200661 hdev = hci_dev_get(dev);
662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return -ENODEV;
664
665 BT_DBG("%s %p", hdev->name, hdev);
666
667 hci_req_lock(hdev);
668
Johan Hovold94324962012-03-15 14:48:41 +0100669 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
670 ret = -ENODEV;
671 goto done;
672 }
673
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200674 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675 ret = -ERFKILL;
676 goto done;
677 }
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (test_bit(HCI_UP, &hdev->flags)) {
680 ret = -EALREADY;
681 goto done;
682 }
683
684 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
685 set_bit(HCI_RAW, &hdev->flags);
686
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200687 /* Treat all non BR/EDR controllers as raw devices if
688 enable_hs is not set */
689 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100690 set_bit(HCI_RAW, &hdev->flags);
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (hdev->open(hdev)) {
693 ret = -EIO;
694 goto done;
695 }
696
697 if (!test_bit(HCI_RAW, &hdev->flags)) {
698 atomic_set(&hdev->cmd_cnt, 1);
699 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200700 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Marcel Holtmann04837f62006-07-03 10:02:33 +0200702 ret = __hci_request(hdev, hci_init_req, 0,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Andre Guedeseead27d2011-06-30 19:20:55 -0300705 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300706 ret = __hci_request(hdev, hci_le_init_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 clear_bit(HCI_INIT, &hdev->flags);
710 }
711
712 if (!ret) {
713 hci_dev_hold(hdev);
714 set_bit(HCI_UP, &hdev->flags);
715 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200716 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300717 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200718 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200720 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900721 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200723 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200724 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400725 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727 skb_queue_purge(&hdev->cmd_q);
728 skb_queue_purge(&hdev->rx_q);
729
730 if (hdev->flush)
731 hdev->flush(hdev);
732
733 if (hdev->sent_cmd) {
734 kfree_skb(hdev->sent_cmd);
735 hdev->sent_cmd = NULL;
736 }
737
738 hdev->close(hdev);
739 hdev->flags = 0;
740 }
741
742done:
743 hci_req_unlock(hdev);
744 hci_dev_put(hdev);
745 return ret;
746}
747
748static int hci_dev_do_close(struct hci_dev *hdev)
749{
750 BT_DBG("%s %p", hdev->name, hdev);
751
Andre Guedes28b75a82012-02-03 17:48:00 -0300752 cancel_work_sync(&hdev->le_scan);
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 hci_req_cancel(hdev, ENODEV);
755 hci_req_lock(hdev);
756
757 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300758 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 hci_req_unlock(hdev);
760 return 0;
761 }
762
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200763 /* Flush RX and TX works */
764 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400765 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200767 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200768 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200769 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200770 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200771 }
772
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200773 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200774 cancel_delayed_work(&hdev->service_cache);
775
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300776 cancel_delayed_work_sync(&hdev->le_scan_disable);
777
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300778 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 inquiry_cache_flush(hdev);
780 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300781 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 hci_notify(hdev, HCI_DEV_DOWN);
784
785 if (hdev->flush)
786 hdev->flush(hdev);
787
788 /* Reset device */
789 skb_queue_purge(&hdev->cmd_q);
790 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200791 if (!test_bit(HCI_RAW, &hdev->flags) &&
792 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200794 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200795 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 clear_bit(HCI_INIT, &hdev->flags);
797 }
798
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200799 /* flush cmd work */
800 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 /* Drop queues */
803 skb_queue_purge(&hdev->rx_q);
804 skb_queue_purge(&hdev->cmd_q);
805 skb_queue_purge(&hdev->raw_q);
806
807 /* Drop last sent command */
808 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300809 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 kfree_skb(hdev->sent_cmd);
811 hdev->sent_cmd = NULL;
812 }
813
814 /* After this point our queues are empty
815 * and no tasks are scheduled. */
816 hdev->close(hdev);
817
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100818 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 hci_dev_lock(hdev);
820 mgmt_powered(hdev, 0);
821 hci_dev_unlock(hdev);
822 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* Clear flags */
825 hdev->flags = 0;
826
Johan Hedberge59fda82012-02-22 18:11:53 +0200827 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200828 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 hci_req_unlock(hdev);
831
832 hci_dev_put(hdev);
833 return 0;
834}
835
836int hci_dev_close(__u16 dev)
837{
838 struct hci_dev *hdev;
839 int err;
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 hdev = hci_dev_get(dev);
842 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100844
845 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
846 cancel_delayed_work(&hdev->power_off);
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 hci_dev_put(hdev);
851 return err;
852}
853
854int hci_dev_reset(__u16 dev)
855{
856 struct hci_dev *hdev;
857 int ret = 0;
858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200859 hdev = hci_dev_get(dev);
860 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return -ENODEV;
862
863 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 if (!test_bit(HCI_UP, &hdev->flags))
866 goto done;
867
868 /* Drop queues */
869 skb_queue_purge(&hdev->rx_q);
870 skb_queue_purge(&hdev->cmd_q);
871
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300872 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 inquiry_cache_flush(hdev);
874 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300875 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
877 if (hdev->flush)
878 hdev->flush(hdev);
879
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900880 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300881 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200884 ret = __hci_request(hdev, hci_reset_req, 0,
885 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 hci_req_unlock(hdev);
889 hci_dev_put(hdev);
890 return ret;
891}
892
893int hci_dev_reset_stat(__u16 dev)
894{
895 struct hci_dev *hdev;
896 int ret = 0;
897
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200898 hdev = hci_dev_get(dev);
899 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return -ENODEV;
901
902 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903
904 hci_dev_put(hdev);
905
906 return ret;
907}
908
909int hci_dev_cmd(unsigned int cmd, void __user *arg)
910{
911 struct hci_dev *hdev;
912 struct hci_dev_req dr;
913 int err = 0;
914
915 if (copy_from_user(&dr, arg, sizeof(dr)))
916 return -EFAULT;
917
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200918 hdev = hci_dev_get(dr.dev_id);
919 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return -ENODEV;
921
922 switch (cmd) {
923 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 break;
927
928 case HCISETENCRYPT:
929 if (!lmp_encrypt_capable(hdev)) {
930 err = -EOPNOTSUPP;
931 break;
932 }
933
934 if (!test_bit(HCI_AUTH, &hdev->flags)) {
935 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200936 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
937 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 if (err)
939 break;
940 }
941
Marcel Holtmann04837f62006-07-03 10:02:33 +0200942 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
946 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200947 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
948 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200951 case HCISETLINKPOL:
952 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
953 msecs_to_jiffies(HCI_INIT_TIMEOUT));
954 break;
955
956 case HCISETLINKMODE:
957 hdev->link_mode = ((__u16) dr.dev_opt) &
958 (HCI_LM_MASTER | HCI_LM_ACCEPT);
959 break;
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 case HCISETPTYPE:
962 hdev->pkt_type = (__u16) dr.dev_opt;
963 break;
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
967 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 break;
969
970 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200971 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
972 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 break;
974
975 default:
976 err = -EINVAL;
977 break;
978 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 hci_dev_put(hdev);
981 return err;
982}
983
984int hci_get_dev_list(void __user *arg)
985{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200986 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct hci_dev_list_req *dl;
988 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 int n = 0, size, err;
990 __u16 dev_num;
991
992 if (get_user(dev_num, (__u16 __user *) arg))
993 return -EFAULT;
994
995 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
996 return -EINVAL;
997
998 size = sizeof(*dl) + dev_num * sizeof(*dr);
999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 dl = kzalloc(size, GFP_KERNEL);
1001 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 return -ENOMEM;
1003
1004 dr = dl->dev_req;
1005
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001006 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001007 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001008 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001009 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001010
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001011 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1012 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 (dr + n)->dev_id = hdev->id;
1015 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 if (++n >= dev_num)
1018 break;
1019 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001020 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 dl->dev_num = n;
1023 size = sizeof(*dl) + n * sizeof(*dr);
1024
1025 err = copy_to_user(arg, dl, size);
1026 kfree(dl);
1027
1028 return err ? -EFAULT : 0;
1029}
1030
1031int hci_get_dev_info(void __user *arg)
1032{
1033 struct hci_dev *hdev;
1034 struct hci_dev_info di;
1035 int err = 0;
1036
1037 if (copy_from_user(&di, arg, sizeof(di)))
1038 return -EFAULT;
1039
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001040 hdev = hci_dev_get(di.dev_id);
1041 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return -ENODEV;
1043
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001044 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001045 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001046
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001047 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1048 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 strcpy(di.name, hdev->name);
1051 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001052 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 di.flags = hdev->flags;
1054 di.pkt_type = hdev->pkt_type;
1055 di.acl_mtu = hdev->acl_mtu;
1056 di.acl_pkts = hdev->acl_pkts;
1057 di.sco_mtu = hdev->sco_mtu;
1058 di.sco_pkts = hdev->sco_pkts;
1059 di.link_policy = hdev->link_policy;
1060 di.link_mode = hdev->link_mode;
1061
1062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1063 memcpy(&di.features, &hdev->features, sizeof(di.features));
1064
1065 if (copy_to_user(arg, &di, sizeof(di)))
1066 err = -EFAULT;
1067
1068 hci_dev_put(hdev);
1069
1070 return err;
1071}
1072
1073/* ---- Interface to HCI drivers ---- */
1074
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001075static int hci_rfkill_set_block(void *data, bool blocked)
1076{
1077 struct hci_dev *hdev = data;
1078
1079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080
1081 if (!blocked)
1082 return 0;
1083
1084 hci_dev_do_close(hdev);
1085
1086 return 0;
1087}
1088
1089static const struct rfkill_ops hci_rfkill_ops = {
1090 .set_block = hci_rfkill_set_block,
1091};
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/* Alloc HCI device */
1094struct hci_dev *hci_alloc_dev(void)
1095{
1096 struct hci_dev *hdev;
1097
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001098 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (!hdev)
1100 return NULL;
1101
David Herrmann0ac7e702011-10-08 14:58:47 +02001102 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 skb_queue_head_init(&hdev->driver_init);
1104
1105 return hdev;
1106}
1107EXPORT_SYMBOL(hci_alloc_dev);
1108
1109/* Free HCI device */
1110void hci_free_dev(struct hci_dev *hdev)
1111{
1112 skb_queue_purge(&hdev->driver_init);
1113
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001114 /* will free via device release */
1115 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117EXPORT_SYMBOL(hci_free_dev);
1118
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001119static void hci_power_on(struct work_struct *work)
1120{
1121 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122
1123 BT_DBG("%s", hdev->name);
1124
1125 if (hci_dev_open(hdev->id) < 0)
1126 return;
1127
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001128 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001129 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001130 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001131
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001132 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001133 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001134}
1135
1136static void hci_power_off(struct work_struct *work)
1137{
Johan Hedberg32435532011-11-07 22:16:04 +02001138 struct hci_dev *hdev = container_of(work, struct hci_dev,
1139 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001140
1141 BT_DBG("%s", hdev->name);
1142
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001143 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001144}
1145
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001146static void hci_discov_off(struct work_struct *work)
1147{
1148 struct hci_dev *hdev;
1149 u8 scan = SCAN_PAGE;
1150
1151 hdev = container_of(work, struct hci_dev, discov_off.work);
1152
1153 BT_DBG("%s", hdev->name);
1154
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001155 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001156
1157 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158
1159 hdev->discov_timeout = 0;
1160
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001161 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001162}
1163
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001164int hci_uuids_clear(struct hci_dev *hdev)
1165{
1166 struct list_head *p, *n;
1167
1168 list_for_each_safe(p, n, &hdev->uuids) {
1169 struct bt_uuid *uuid;
1170
1171 uuid = list_entry(p, struct bt_uuid, list);
1172
1173 list_del(p);
1174 kfree(uuid);
1175 }
1176
1177 return 0;
1178}
1179
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001180int hci_link_keys_clear(struct hci_dev *hdev)
1181{
1182 struct list_head *p, *n;
1183
1184 list_for_each_safe(p, n, &hdev->link_keys) {
1185 struct link_key *key;
1186
1187 key = list_entry(p, struct link_key, list);
1188
1189 list_del(p);
1190 kfree(key);
1191 }
1192
1193 return 0;
1194}
1195
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001196int hci_smp_ltks_clear(struct hci_dev *hdev)
1197{
1198 struct smp_ltk *k, *tmp;
1199
1200 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1201 list_del(&k->list);
1202 kfree(k);
1203 }
1204
1205 return 0;
1206}
1207
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001208struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001210 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001211
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001212 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001213 if (bacmp(bdaddr, &k->bdaddr) == 0)
1214 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001215
1216 return NULL;
1217}
1218
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301219static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001220 u8 key_type, u8 old_key_type)
1221{
1222 /* Legacy key */
1223 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301224 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001225
1226 /* Debug keys are insecure so don't store them persistently */
1227 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301228 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001229
1230 /* Changed combination key and there's no previous one */
1231 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301232 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001233
1234 /* Security mode 3 case */
1235 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301236 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001237
1238 /* Neither local nor remote side had no-bonding as requirement */
1239 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301240 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001241
1242 /* Local side had dedicated bonding as requirement */
1243 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301244 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001245
1246 /* Remote side had dedicated bonding as requirement */
1247 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301248 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001249
1250 /* If none of the above criteria match, then don't store the key
1251 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301252 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001253}
1254
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001255struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001257 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001258
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259 list_for_each_entry(k, &hdev->long_term_keys, list) {
1260 if (k->ediv != ediv ||
1261 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001262 continue;
1263
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001264 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001265 }
1266
1267 return NULL;
1268}
1269EXPORT_SYMBOL(hci_find_ltk);
1270
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001271struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001272 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001274 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 list_for_each_entry(k, &hdev->long_term_keys, list)
1277 if (addr_type == k->bdaddr_type &&
1278 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279 return k;
1280
1281 return NULL;
1282}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001283EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001284
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001285int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001286 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001287{
1288 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301289 u8 old_key_type;
1290 bool persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001291
1292 old_key = hci_find_link_key(hdev, bdaddr);
1293 if (old_key) {
1294 old_key_type = old_key->type;
1295 key = old_key;
1296 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001297 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001298 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1299 if (!key)
1300 return -ENOMEM;
1301 list_add(&key->list, &hdev->link_keys);
1302 }
1303
1304 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1305
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001306 /* Some buggy controller combinations generate a changed
1307 * combination key for legacy pairing even when there's no
1308 * previous key */
1309 if (type == HCI_LK_CHANGED_COMBINATION &&
1310 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001311 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001312 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001313 if (conn)
1314 conn->key_type = type;
1315 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001316
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001317 bacpy(&key->bdaddr, bdaddr);
1318 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001319 key->pin_len = pin_len;
1320
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001321 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001322 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001323 else
1324 key->type = type;
1325
Johan Hedberg4df378a2011-04-28 11:29:03 -07001326 if (!new_key)
1327 return 0;
1328
1329 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1330
Johan Hedberg744cf192011-11-08 20:40:14 +02001331 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001332
1333 if (!persistent) {
1334 list_del(&key->list);
1335 kfree(key);
1336 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001337
1338 return 0;
1339}
1340
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001341int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001342 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1343 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001344{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001345 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001346
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001347 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1348 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001349
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001350 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1351 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001352 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001353 else {
1354 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001355 if (!key)
1356 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001357 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001358 }
1359
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001360 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001361 key->bdaddr_type = addr_type;
1362 memcpy(key->val, tk, sizeof(key->val));
1363 key->authenticated = authenticated;
1364 key->ediv = ediv;
1365 key->enc_size = enc_size;
1366 key->type = type;
1367 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001368
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001369 if (!new_key)
1370 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001371
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001372 if (type & HCI_SMP_LTK)
1373 mgmt_new_ltk(hdev, key, 1);
1374
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001375 return 0;
1376}
1377
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001378int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1379{
1380 struct link_key *key;
1381
1382 key = hci_find_link_key(hdev, bdaddr);
1383 if (!key)
1384 return -ENOENT;
1385
1386 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1387
1388 list_del(&key->list);
1389 kfree(key);
1390
1391 return 0;
1392}
1393
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001394int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395{
1396 struct smp_ltk *k, *tmp;
1397
1398 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1399 if (bacmp(bdaddr, &k->bdaddr))
1400 continue;
1401
1402 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1403
1404 list_del(&k->list);
1405 kfree(k);
1406 }
1407
1408 return 0;
1409}
1410
Ville Tervo6bd32322011-02-16 16:32:41 +02001411/* HCI command timer function */
1412static void hci_cmd_timer(unsigned long arg)
1413{
1414 struct hci_dev *hdev = (void *) arg;
1415
1416 BT_ERR("%s command tx timeout", hdev->name);
1417 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001418 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001419}
1420
Szymon Janc2763eda2011-03-22 13:12:22 +01001421struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001422 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001423{
1424 struct oob_data *data;
1425
1426 list_for_each_entry(data, &hdev->remote_oob_data, list)
1427 if (bacmp(bdaddr, &data->bdaddr) == 0)
1428 return data;
1429
1430 return NULL;
1431}
1432
1433int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1434{
1435 struct oob_data *data;
1436
1437 data = hci_find_remote_oob_data(hdev, bdaddr);
1438 if (!data)
1439 return -ENOENT;
1440
1441 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1442
1443 list_del(&data->list);
1444 kfree(data);
1445
1446 return 0;
1447}
1448
1449int hci_remote_oob_data_clear(struct hci_dev *hdev)
1450{
1451 struct oob_data *data, *n;
1452
1453 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1454 list_del(&data->list);
1455 kfree(data);
1456 }
1457
1458 return 0;
1459}
1460
1461int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001462 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001463{
1464 struct oob_data *data;
1465
1466 data = hci_find_remote_oob_data(hdev, bdaddr);
1467
1468 if (!data) {
1469 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1470 if (!data)
1471 return -ENOMEM;
1472
1473 bacpy(&data->bdaddr, bdaddr);
1474 list_add(&data->list, &hdev->remote_oob_data);
1475 }
1476
1477 memcpy(data->hash, hash, sizeof(data->hash));
1478 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1479
1480 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1481
1482 return 0;
1483}
1484
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001485struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001487 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001489 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490 if (bacmp(bdaddr, &b->bdaddr) == 0)
1491 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492
1493 return NULL;
1494}
1495
1496int hci_blacklist_clear(struct hci_dev *hdev)
1497{
1498 struct list_head *p, *n;
1499
1500 list_for_each_safe(p, n, &hdev->blacklist) {
1501 struct bdaddr_list *b;
1502
1503 b = list_entry(p, struct bdaddr_list, list);
1504
1505 list_del(p);
1506 kfree(b);
1507 }
1508
1509 return 0;
1510}
1511
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001512int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513{
1514 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001515
1516 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1517 return -EBADF;
1518
Antti Julku5e762442011-08-25 16:48:02 +03001519 if (hci_blacklist_lookup(hdev, bdaddr))
1520 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001521
1522 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001523 if (!entry)
1524 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001525
1526 bacpy(&entry->bdaddr, bdaddr);
1527
1528 list_add(&entry->list, &hdev->blacklist);
1529
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001530 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001531}
1532
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001533int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001534{
1535 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001536
Szymon Janc1ec918c2011-11-16 09:32:21 +01001537 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001538 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001539
1540 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001541 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001542 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001543
1544 list_del(&entry->list);
1545 kfree(entry);
1546
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001547 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001548}
1549
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001550static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001551{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001552 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001553 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001554
1555 hci_dev_lock(hdev);
1556
1557 hci_adv_entries_clear(hdev);
1558
1559 hci_dev_unlock(hdev);
1560}
1561
Andre Guedes76c86862011-05-26 16:23:50 -03001562int hci_adv_entries_clear(struct hci_dev *hdev)
1563{
1564 struct adv_entry *entry, *tmp;
1565
1566 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1567 list_del(&entry->list);
1568 kfree(entry);
1569 }
1570
1571 BT_DBG("%s adv cache cleared", hdev->name);
1572
1573 return 0;
1574}
1575
1576struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1577{
1578 struct adv_entry *entry;
1579
1580 list_for_each_entry(entry, &hdev->adv_entries, list)
1581 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1582 return entry;
1583
1584 return NULL;
1585}
1586
1587static inline int is_connectable_adv(u8 evt_type)
1588{
1589 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1590 return 1;
1591
1592 return 0;
1593}
1594
1595int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001596 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001597 return -EINVAL;
1598
1599 /* Only new entries should be added to adv_entries. So, if
1600 * bdaddr was found, don't add it. */
1601 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1602 return 0;
1603
Andre Guedes4777bfd2012-01-30 23:31:28 -03001604 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001605 if (!entry)
1606 return -ENOMEM;
1607
1608 bacpy(&entry->bdaddr, &ev->bdaddr);
1609 entry->bdaddr_type = ev->bdaddr_type;
1610
1611 list_add(&entry->list, &hdev->adv_entries);
1612
1613 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614 batostr(&entry->bdaddr), entry->bdaddr_type);
1615
1616 return 0;
1617}
1618
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001619static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1620{
1621 struct le_scan_params *param = (struct le_scan_params *) opt;
1622 struct hci_cp_le_set_scan_param cp;
1623
1624 memset(&cp, 0, sizeof(cp));
1625 cp.type = param->type;
1626 cp.interval = cpu_to_le16(param->interval);
1627 cp.window = cpu_to_le16(param->window);
1628
1629 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1630}
1631
1632static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1633{
1634 struct hci_cp_le_set_scan_enable cp;
1635
1636 memset(&cp, 0, sizeof(cp));
1637 cp.enable = 1;
1638
1639 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640}
1641
1642static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001643 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001644{
1645 long timeo = msecs_to_jiffies(3000);
1646 struct le_scan_params param;
1647 int err;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652 return -EINPROGRESS;
1653
1654 param.type = type;
1655 param.interval = interval;
1656 param.window = window;
1657
1658 hci_req_lock(hdev);
1659
1660 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001661 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001662 if (!err)
1663 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1664
1665 hci_req_unlock(hdev);
1666
1667 if (err < 0)
1668 return err;
1669
1670 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001671 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001672
1673 return 0;
1674}
1675
1676static void le_scan_disable_work(struct work_struct *work)
1677{
1678 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001679 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001680 struct hci_cp_le_set_scan_enable cp;
1681
1682 BT_DBG("%s", hdev->name);
1683
1684 memset(&cp, 0, sizeof(cp));
1685
1686 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1687}
1688
Andre Guedes28b75a82012-02-03 17:48:00 -03001689static void le_scan_work(struct work_struct *work)
1690{
1691 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1692 struct le_scan_params *param = &hdev->le_scan_params;
1693
1694 BT_DBG("%s", hdev->name);
1695
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001696 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1697 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001698}
1699
1700int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001701 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001702{
1703 struct le_scan_params *param = &hdev->le_scan_params;
1704
1705 BT_DBG("%s", hdev->name);
1706
1707 if (work_busy(&hdev->le_scan))
1708 return -EINPROGRESS;
1709
1710 param->type = type;
1711 param->interval = interval;
1712 param->window = window;
1713 param->timeout = timeout;
1714
1715 queue_work(system_long_wq, &hdev->le_scan);
1716
1717 return 0;
1718}
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720/* Register HCI device */
1721int hci_register_dev(struct hci_dev *hdev)
1722{
1723 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001724 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001726 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
David Herrmann010666a2012-01-07 15:47:07 +01001728 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 return -EINVAL;
1730
Mat Martineau08add512011-11-02 16:18:36 -07001731 /* Do not allow HCI_AMP devices to register at index 0,
1732 * so the index can be used as the AMP controller ID.
1733 */
1734 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1735
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001736 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 /* Find first available device id */
1739 list_for_each(p, &hci_dev_list) {
1740 if (list_entry(p, struct hci_dev, list)->id != id)
1741 break;
1742 head = p; id++;
1743 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 sprintf(hdev->name, "hci%d", id);
1746 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001747 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001749 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001752 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001754 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001756 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Marcel Holtmann04837f62006-07-03 10:02:33 +02001758 hdev->idle_timeout = 0;
1759 hdev->sniff_max_interval = 800;
1760 hdev->sniff_min_interval = 80;
1761
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001762 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001763 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001764 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
1767 skb_queue_head_init(&hdev->rx_q);
1768 skb_queue_head_init(&hdev->cmd_q);
1769 skb_queue_head_init(&hdev->raw_q);
1770
Ville Tervo6bd32322011-02-16 16:32:41 +02001771 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1772
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301773 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001774 hdev->reassembly[i] = NULL;
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001777 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Johan Hedberg30883512012-01-04 14:16:21 +02001779 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781 hci_conn_hash_init(hdev);
1782
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001783 INIT_LIST_HEAD(&hdev->mgmt_pending);
1784
David Millerea4bd8b2010-07-30 21:54:49 -07001785 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001786
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001787 INIT_LIST_HEAD(&hdev->uuids);
1788
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001789 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001790 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001791
Szymon Janc2763eda2011-03-22 13:12:22 +01001792 INIT_LIST_HEAD(&hdev->remote_oob_data);
1793
Andre Guedes76c86862011-05-26 16:23:50 -03001794 INIT_LIST_HEAD(&hdev->adv_entries);
1795
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001796 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001797 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001798 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001799
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001800 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1803
1804 atomic_set(&hdev->promisc, 0);
1805
Andre Guedes28b75a82012-02-03 17:48:00 -03001806 INIT_WORK(&hdev->le_scan, le_scan_work);
1807
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001808 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1809
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001810 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001812 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1813 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001814 if (!hdev->workqueue) {
1815 error = -ENOMEM;
1816 goto err;
1817 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001818
David Herrmann33ca9542011-10-08 14:58:49 +02001819 error = hci_add_sysfs(hdev);
1820 if (error < 0)
1821 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001823 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1824 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1825 if (hdev->rfkill) {
1826 if (rfkill_register(hdev->rfkill) < 0) {
1827 rfkill_destroy(hdev->rfkill);
1828 hdev->rfkill = NULL;
1829 }
1830 }
1831
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001832 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1833 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001834 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001837 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001840
David Herrmann33ca9542011-10-08 14:58:49 +02001841err_wqueue:
1842 destroy_workqueue(hdev->workqueue);
1843err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001844 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001845 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001846 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001847
David Herrmann33ca9542011-10-08 14:58:49 +02001848 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849}
1850EXPORT_SYMBOL(hci_register_dev);
1851
1852/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001853void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854{
Marcel Holtmannef222012007-07-11 06:42:04 +02001855 int i;
1856
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001857 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Johan Hovold94324962012-03-15 14:48:41 +01001859 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1860
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001861 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001863 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
1865 hci_dev_do_close(hdev);
1866
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301867 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001868 kfree_skb(hdev->reassembly[i]);
1869
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001870 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001871 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001872 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001873 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001874 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001875 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001876
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001877 /* mgmt_index_removed should take care of emptying the
1878 * pending list */
1879 BUG_ON(!list_empty(&hdev->mgmt_pending));
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 hci_notify(hdev, HCI_DEV_UNREG);
1882
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001883 if (hdev->rfkill) {
1884 rfkill_unregister(hdev->rfkill);
1885 rfkill_destroy(hdev->rfkill);
1886 }
1887
David Herrmannce242972011-10-08 14:58:48 +02001888 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001889
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001890 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001891
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001892 destroy_workqueue(hdev->workqueue);
1893
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001895 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001896 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001897 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001898 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001899 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001900 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001901 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001902
David Herrmanndc946bd2012-01-07 15:47:24 +01001903 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904}
1905EXPORT_SYMBOL(hci_unregister_dev);
1906
1907/* Suspend HCI device */
1908int hci_suspend_dev(struct hci_dev *hdev)
1909{
1910 hci_notify(hdev, HCI_DEV_SUSPEND);
1911 return 0;
1912}
1913EXPORT_SYMBOL(hci_suspend_dev);
1914
1915/* Resume HCI device */
1916int hci_resume_dev(struct hci_dev *hdev)
1917{
1918 hci_notify(hdev, HCI_DEV_RESUME);
1919 return 0;
1920}
1921EXPORT_SYMBOL(hci_resume_dev);
1922
Marcel Holtmann76bca882009-11-18 00:40:39 +01001923/* Receive frame from HCI drivers */
1924int hci_recv_frame(struct sk_buff *skb)
1925{
1926 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1927 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1928 && !test_bit(HCI_INIT, &hdev->flags))) {
1929 kfree_skb(skb);
1930 return -ENXIO;
1931 }
1932
1933 /* Incomming skb */
1934 bt_cb(skb)->incoming = 1;
1935
1936 /* Time stamp */
1937 __net_timestamp(skb);
1938
Marcel Holtmann76bca882009-11-18 00:40:39 +01001939 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001940 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001941
Marcel Holtmann76bca882009-11-18 00:40:39 +01001942 return 0;
1943}
1944EXPORT_SYMBOL(hci_recv_frame);
1945
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301946static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001947 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301948{
1949 int len = 0;
1950 int hlen = 0;
1951 int remain = count;
1952 struct sk_buff *skb;
1953 struct bt_skb_cb *scb;
1954
1955 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1956 index >= NUM_REASSEMBLY)
1957 return -EILSEQ;
1958
1959 skb = hdev->reassembly[index];
1960
1961 if (!skb) {
1962 switch (type) {
1963 case HCI_ACLDATA_PKT:
1964 len = HCI_MAX_FRAME_SIZE;
1965 hlen = HCI_ACL_HDR_SIZE;
1966 break;
1967 case HCI_EVENT_PKT:
1968 len = HCI_MAX_EVENT_SIZE;
1969 hlen = HCI_EVENT_HDR_SIZE;
1970 break;
1971 case HCI_SCODATA_PKT:
1972 len = HCI_MAX_SCO_SIZE;
1973 hlen = HCI_SCO_HDR_SIZE;
1974 break;
1975 }
1976
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001977 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301978 if (!skb)
1979 return -ENOMEM;
1980
1981 scb = (void *) skb->cb;
1982 scb->expect = hlen;
1983 scb->pkt_type = type;
1984
1985 skb->dev = (void *) hdev;
1986 hdev->reassembly[index] = skb;
1987 }
1988
1989 while (count) {
1990 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001991 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301992
1993 memcpy(skb_put(skb, len), data, len);
1994
1995 count -= len;
1996 data += len;
1997 scb->expect -= len;
1998 remain = count;
1999
2000 switch (type) {
2001 case HCI_EVENT_PKT:
2002 if (skb->len == HCI_EVENT_HDR_SIZE) {
2003 struct hci_event_hdr *h = hci_event_hdr(skb);
2004 scb->expect = h->plen;
2005
2006 if (skb_tailroom(skb) < scb->expect) {
2007 kfree_skb(skb);
2008 hdev->reassembly[index] = NULL;
2009 return -ENOMEM;
2010 }
2011 }
2012 break;
2013
2014 case HCI_ACLDATA_PKT:
2015 if (skb->len == HCI_ACL_HDR_SIZE) {
2016 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2017 scb->expect = __le16_to_cpu(h->dlen);
2018
2019 if (skb_tailroom(skb) < scb->expect) {
2020 kfree_skb(skb);
2021 hdev->reassembly[index] = NULL;
2022 return -ENOMEM;
2023 }
2024 }
2025 break;
2026
2027 case HCI_SCODATA_PKT:
2028 if (skb->len == HCI_SCO_HDR_SIZE) {
2029 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2030 scb->expect = h->dlen;
2031
2032 if (skb_tailroom(skb) < scb->expect) {
2033 kfree_skb(skb);
2034 hdev->reassembly[index] = NULL;
2035 return -ENOMEM;
2036 }
2037 }
2038 break;
2039 }
2040
2041 if (scb->expect == 0) {
2042 /* Complete frame */
2043
2044 bt_cb(skb)->pkt_type = type;
2045 hci_recv_frame(skb);
2046
2047 hdev->reassembly[index] = NULL;
2048 return remain;
2049 }
2050 }
2051
2052 return remain;
2053}
2054
Marcel Holtmannef222012007-07-11 06:42:04 +02002055int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2056{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302057 int rem = 0;
2058
Marcel Holtmannef222012007-07-11 06:42:04 +02002059 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2060 return -EILSEQ;
2061
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002062 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002063 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302064 if (rem < 0)
2065 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002066
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302067 data += (count - rem);
2068 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002069 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002070
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302071 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002072}
2073EXPORT_SYMBOL(hci_recv_fragment);
2074
Suraj Sumangala99811512010-07-14 13:02:19 +05302075#define STREAM_REASSEMBLY 0
2076
2077int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2078{
2079 int type;
2080 int rem = 0;
2081
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002082 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302083 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2084
2085 if (!skb) {
2086 struct { char type; } *pkt;
2087
2088 /* Start of the frame */
2089 pkt = data;
2090 type = pkt->type;
2091
2092 data++;
2093 count--;
2094 } else
2095 type = bt_cb(skb)->pkt_type;
2096
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002097 rem = hci_reassembly(hdev, type, data, count,
2098 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302099 if (rem < 0)
2100 return rem;
2101
2102 data += (count - rem);
2103 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002104 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302105
2106 return rem;
2107}
2108EXPORT_SYMBOL(hci_recv_stream_fragment);
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110/* ---- Interface to upper protocols ---- */
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112int hci_register_cb(struct hci_cb *cb)
2113{
2114 BT_DBG("%p name %s", cb, cb->name);
2115
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002116 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002118 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 return 0;
2121}
2122EXPORT_SYMBOL(hci_register_cb);
2123
2124int hci_unregister_cb(struct hci_cb *cb)
2125{
2126 BT_DBG("%p name %s", cb, cb->name);
2127
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002128 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002130 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 return 0;
2133}
2134EXPORT_SYMBOL(hci_unregister_cb);
2135
2136static int hci_send_frame(struct sk_buff *skb)
2137{
2138 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2139
2140 if (!hdev) {
2141 kfree_skb(skb);
2142 return -ENODEV;
2143 }
2144
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002145 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002147 /* Time stamp */
2148 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002150 /* Send copy to monitor */
2151 hci_send_to_monitor(hdev, skb);
2152
2153 if (atomic_read(&hdev->promisc)) {
2154 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002155 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
2158 /* Get rid of skb owner, prior to sending to the driver. */
2159 skb_orphan(skb);
2160
2161 return hdev->send(skb);
2162}
2163
2164/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002165int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166{
2167 int len = HCI_COMMAND_HDR_SIZE + plen;
2168 struct hci_command_hdr *hdr;
2169 struct sk_buff *skb;
2170
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002171 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
2173 skb = bt_skb_alloc(len, GFP_ATOMIC);
2174 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002175 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return -ENOMEM;
2177 }
2178
2179 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002180 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 hdr->plen = plen;
2182
2183 if (plen)
2184 memcpy(skb_put(skb, plen), param, plen);
2185
2186 BT_DBG("skb len %d", skb->len);
2187
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002188 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002190
Johan Hedberga5040ef2011-01-10 13:28:59 +02002191 if (test_bit(HCI_INIT, &hdev->flags))
2192 hdev->init_last_cmd = opcode;
2193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002195 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197 return 0;
2198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
2200/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002201void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
2203 struct hci_command_hdr *hdr;
2204
2205 if (!hdev->sent_cmd)
2206 return NULL;
2207
2208 hdr = (void *) hdev->sent_cmd->data;
2209
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002210 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 return NULL;
2212
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002213 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2216}
2217
2218/* Send ACL data */
2219static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2220{
2221 struct hci_acl_hdr *hdr;
2222 int len = skb->len;
2223
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002224 skb_push(skb, HCI_ACL_HDR_SIZE);
2225 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002226 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002227 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2228 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229}
2230
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002231static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2232 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233{
2234 struct hci_dev *hdev = conn->hdev;
2235 struct sk_buff *list;
2236
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002237 list = skb_shinfo(skb)->frag_list;
2238 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 /* Non fragmented */
2240 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2241
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002242 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 } else {
2244 /* Fragmented */
2245 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2246
2247 skb_shinfo(skb)->frag_list = NULL;
2248
2249 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002250 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002252 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002253
2254 flags &= ~ACL_START;
2255 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 do {
2257 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002260 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002261 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
2263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2264
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002265 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 } while (list);
2267
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002268 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002270}
2271
2272void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2273{
2274 struct hci_conn *conn = chan->conn;
2275 struct hci_dev *hdev = conn->hdev;
2276
2277 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2278
2279 skb->dev = (void *) hdev;
2280 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2281 hci_add_acl_hdr(skb, conn->handle, flags);
2282
2283 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002285 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286}
2287EXPORT_SYMBOL(hci_send_acl);
2288
2289/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002290void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291{
2292 struct hci_dev *hdev = conn->hdev;
2293 struct hci_sco_hdr hdr;
2294
2295 BT_DBG("%s len %d", hdev->name, skb->len);
2296
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002297 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 hdr.dlen = skb->len;
2299
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002300 skb_push(skb, HCI_SCO_HDR_SIZE);
2301 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002302 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
2304 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002305 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002308 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309}
2310EXPORT_SYMBOL(hci_send_sco);
2311
2312/* ---- HCI TX task (outgoing data) ---- */
2313
2314/* HCI Connection scheduler */
2315static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2316{
2317 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002318 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002321 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002323
2324 rcu_read_lock();
2325
2326 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002327 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002329
2330 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2331 continue;
2332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 num++;
2334
2335 if (c->sent < min) {
2336 min = c->sent;
2337 conn = c;
2338 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002339
2340 if (hci_conn_num(hdev, type) == num)
2341 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 }
2343
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002344 rcu_read_unlock();
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002347 int cnt, q;
2348
2349 switch (conn->type) {
2350 case ACL_LINK:
2351 cnt = hdev->acl_cnt;
2352 break;
2353 case SCO_LINK:
2354 case ESCO_LINK:
2355 cnt = hdev->sco_cnt;
2356 break;
2357 case LE_LINK:
2358 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2359 break;
2360 default:
2361 cnt = 0;
2362 BT_ERR("Unknown link type");
2363 }
2364
2365 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 *quote = q ? q : 1;
2367 } else
2368 *quote = 0;
2369
2370 BT_DBG("conn %p quote %d", conn, *quote);
2371 return conn;
2372}
2373
Ville Tervobae1f5d2011-02-10 22:38:53 -03002374static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375{
2376 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002377 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Ville Tervobae1f5d2011-02-10 22:38:53 -03002379 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002381 rcu_read_lock();
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002384 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002385 if (c->type == type && c->sent) {
2386 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 hdev->name, batostr(&c->dst));
2388 hci_acl_disconn(c, 0x13);
2389 }
2390 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002391
2392 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002395static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2396 int *quote)
2397{
2398 struct hci_conn_hash *h = &hdev->conn_hash;
2399 struct hci_chan *chan = NULL;
2400 int num = 0, min = ~0, cur_prio = 0;
2401 struct hci_conn *conn;
2402 int cnt, q, conn_num = 0;
2403
2404 BT_DBG("%s", hdev->name);
2405
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002406 rcu_read_lock();
2407
2408 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002409 struct hci_chan *tmp;
2410
2411 if (conn->type != type)
2412 continue;
2413
2414 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2415 continue;
2416
2417 conn_num++;
2418
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002419 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002420 struct sk_buff *skb;
2421
2422 if (skb_queue_empty(&tmp->data_q))
2423 continue;
2424
2425 skb = skb_peek(&tmp->data_q);
2426 if (skb->priority < cur_prio)
2427 continue;
2428
2429 if (skb->priority > cur_prio) {
2430 num = 0;
2431 min = ~0;
2432 cur_prio = skb->priority;
2433 }
2434
2435 num++;
2436
2437 if (conn->sent < min) {
2438 min = conn->sent;
2439 chan = tmp;
2440 }
2441 }
2442
2443 if (hci_conn_num(hdev, type) == conn_num)
2444 break;
2445 }
2446
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002447 rcu_read_unlock();
2448
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002449 if (!chan)
2450 return NULL;
2451
2452 switch (chan->conn->type) {
2453 case ACL_LINK:
2454 cnt = hdev->acl_cnt;
2455 break;
2456 case SCO_LINK:
2457 case ESCO_LINK:
2458 cnt = hdev->sco_cnt;
2459 break;
2460 case LE_LINK:
2461 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2462 break;
2463 default:
2464 cnt = 0;
2465 BT_ERR("Unknown link type");
2466 }
2467
2468 q = cnt / num;
2469 *quote = q ? q : 1;
2470 BT_DBG("chan %p quote %d", chan, *quote);
2471 return chan;
2472}
2473
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002474static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2475{
2476 struct hci_conn_hash *h = &hdev->conn_hash;
2477 struct hci_conn *conn;
2478 int num = 0;
2479
2480 BT_DBG("%s", hdev->name);
2481
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002482 rcu_read_lock();
2483
2484 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002485 struct hci_chan *chan;
2486
2487 if (conn->type != type)
2488 continue;
2489
2490 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2491 continue;
2492
2493 num++;
2494
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002495 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002496 struct sk_buff *skb;
2497
2498 if (chan->sent) {
2499 chan->sent = 0;
2500 continue;
2501 }
2502
2503 if (skb_queue_empty(&chan->data_q))
2504 continue;
2505
2506 skb = skb_peek(&chan->data_q);
2507 if (skb->priority >= HCI_PRIO_MAX - 1)
2508 continue;
2509
2510 skb->priority = HCI_PRIO_MAX - 1;
2511
2512 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2513 skb->priority);
2514 }
2515
2516 if (hci_conn_num(hdev, type) == num)
2517 break;
2518 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002519
2520 rcu_read_unlock();
2521
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002522}
2523
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002524static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2525{
2526 /* Calculate count of blocks used by this packet */
2527 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2528}
2529
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002530static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 if (!test_bit(HCI_RAW, &hdev->flags)) {
2533 /* ACL tx timeout must be longer than maximum
2534 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002535 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002536 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002537 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002539}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002541static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2542{
2543 unsigned int cnt = hdev->acl_cnt;
2544 struct hci_chan *chan;
2545 struct sk_buff *skb;
2546 int quote;
2547
2548 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002549
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002550 while (hdev->acl_cnt &&
2551 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002552 u32 priority = (skb_peek(&chan->data_q))->priority;
2553 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002554 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2555 skb->len, skb->priority);
2556
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002557 /* Stop if priority has changed */
2558 if (skb->priority < priority)
2559 break;
2560
2561 skb = skb_dequeue(&chan->data_q);
2562
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002563 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002564 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 hci_send_frame(skb);
2567 hdev->acl_last_tx = jiffies;
2568
2569 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002570 chan->sent++;
2571 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 }
2573 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002574
2575 if (cnt != hdev->acl_cnt)
2576 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577}
2578
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002579static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2580{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002581 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002582 struct hci_chan *chan;
2583 struct sk_buff *skb;
2584 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002585
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002586 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002587
2588 while (hdev->block_cnt > 0 &&
2589 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2590 u32 priority = (skb_peek(&chan->data_q))->priority;
2591 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2592 int blocks;
2593
2594 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2595 skb->len, skb->priority);
2596
2597 /* Stop if priority has changed */
2598 if (skb->priority < priority)
2599 break;
2600
2601 skb = skb_dequeue(&chan->data_q);
2602
2603 blocks = __get_blocks(hdev, skb);
2604 if (blocks > hdev->block_cnt)
2605 return;
2606
2607 hci_conn_enter_active_mode(chan->conn,
2608 bt_cb(skb)->force_active);
2609
2610 hci_send_frame(skb);
2611 hdev->acl_last_tx = jiffies;
2612
2613 hdev->block_cnt -= blocks;
2614 quote -= blocks;
2615
2616 chan->sent += blocks;
2617 chan->conn->sent += blocks;
2618 }
2619 }
2620
2621 if (cnt != hdev->block_cnt)
2622 hci_prio_recalculate(hdev, ACL_LINK);
2623}
2624
2625static inline void hci_sched_acl(struct hci_dev *hdev)
2626{
2627 BT_DBG("%s", hdev->name);
2628
2629 if (!hci_conn_num(hdev, ACL_LINK))
2630 return;
2631
2632 switch (hdev->flow_ctl_mode) {
2633 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2634 hci_sched_acl_pkt(hdev);
2635 break;
2636
2637 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2638 hci_sched_acl_blk(hdev);
2639 break;
2640 }
2641}
2642
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643/* Schedule SCO */
2644static inline void hci_sched_sco(struct hci_dev *hdev)
2645{
2646 struct hci_conn *conn;
2647 struct sk_buff *skb;
2648 int quote;
2649
2650 BT_DBG("%s", hdev->name);
2651
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002652 if (!hci_conn_num(hdev, SCO_LINK))
2653 return;
2654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2656 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2657 BT_DBG("skb %p len %d", skb, skb->len);
2658 hci_send_frame(skb);
2659
2660 conn->sent++;
2661 if (conn->sent == ~0)
2662 conn->sent = 0;
2663 }
2664 }
2665}
2666
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002667static inline void hci_sched_esco(struct hci_dev *hdev)
2668{
2669 struct hci_conn *conn;
2670 struct sk_buff *skb;
2671 int quote;
2672
2673 BT_DBG("%s", hdev->name);
2674
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002675 if (!hci_conn_num(hdev, ESCO_LINK))
2676 return;
2677
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002678 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2679 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2680 BT_DBG("skb %p len %d", skb, skb->len);
2681 hci_send_frame(skb);
2682
2683 conn->sent++;
2684 if (conn->sent == ~0)
2685 conn->sent = 0;
2686 }
2687 }
2688}
2689
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002690static inline void hci_sched_le(struct hci_dev *hdev)
2691{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002692 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002693 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002694 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002695
2696 BT_DBG("%s", hdev->name);
2697
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002698 if (!hci_conn_num(hdev, LE_LINK))
2699 return;
2700
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002701 if (!test_bit(HCI_RAW, &hdev->flags)) {
2702 /* LE tx timeout must be longer than maximum
2703 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002704 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002705 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002706 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002707 }
2708
2709 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002710 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002711 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002712 u32 priority = (skb_peek(&chan->data_q))->priority;
2713 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002714 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2715 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002716
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002717 /* Stop if priority has changed */
2718 if (skb->priority < priority)
2719 break;
2720
2721 skb = skb_dequeue(&chan->data_q);
2722
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002723 hci_send_frame(skb);
2724 hdev->le_last_tx = jiffies;
2725
2726 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002727 chan->sent++;
2728 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002729 }
2730 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002731
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002732 if (hdev->le_pkts)
2733 hdev->le_cnt = cnt;
2734 else
2735 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002736
2737 if (cnt != tmp)
2738 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002739}
2740
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002741static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002743 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 struct sk_buff *skb;
2745
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002746 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2747 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748
2749 /* Schedule queues and send stuff to HCI driver */
2750
2751 hci_sched_acl(hdev);
2752
2753 hci_sched_sco(hdev);
2754
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002755 hci_sched_esco(hdev);
2756
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002757 hci_sched_le(hdev);
2758
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 /* Send next queued raw (unknown type) packet */
2760 while ((skb = skb_dequeue(&hdev->raw_q)))
2761 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762}
2763
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002764/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766/* ACL data packet */
2767static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2768{
2769 struct hci_acl_hdr *hdr = (void *) skb->data;
2770 struct hci_conn *conn;
2771 __u16 handle, flags;
2772
2773 skb_pull(skb, HCI_ACL_HDR_SIZE);
2774
2775 handle = __le16_to_cpu(hdr->handle);
2776 flags = hci_flags(handle);
2777 handle = hci_handle(handle);
2778
2779 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2780
2781 hdev->stat.acl_rx++;
2782
2783 hci_dev_lock(hdev);
2784 conn = hci_conn_hash_lookup_handle(hdev, handle);
2785 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002786
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002788 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002789
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002791 l2cap_recv_acldata(conn, skb, flags);
2792 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002794 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 hdev->name, handle);
2796 }
2797
2798 kfree_skb(skb);
2799}
2800
2801/* SCO data packet */
2802static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2803{
2804 struct hci_sco_hdr *hdr = (void *) skb->data;
2805 struct hci_conn *conn;
2806 __u16 handle;
2807
2808 skb_pull(skb, HCI_SCO_HDR_SIZE);
2809
2810 handle = __le16_to_cpu(hdr->handle);
2811
2812 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2813
2814 hdev->stat.sco_rx++;
2815
2816 hci_dev_lock(hdev);
2817 conn = hci_conn_hash_lookup_handle(hdev, handle);
2818 hci_dev_unlock(hdev);
2819
2820 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002822 sco_recv_scodata(conn, skb);
2823 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002825 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 hdev->name, handle);
2827 }
2828
2829 kfree_skb(skb);
2830}
2831
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002832static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002834 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 struct sk_buff *skb;
2836
2837 BT_DBG("%s", hdev->name);
2838
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002840 /* Send copy to monitor */
2841 hci_send_to_monitor(hdev, skb);
2842
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 if (atomic_read(&hdev->promisc)) {
2844 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002845 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847
2848 if (test_bit(HCI_RAW, &hdev->flags)) {
2849 kfree_skb(skb);
2850 continue;
2851 }
2852
2853 if (test_bit(HCI_INIT, &hdev->flags)) {
2854 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002855 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 case HCI_ACLDATA_PKT:
2857 case HCI_SCODATA_PKT:
2858 kfree_skb(skb);
2859 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002860 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 }
2862
2863 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002864 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002866 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 hci_event_packet(hdev, skb);
2868 break;
2869
2870 case HCI_ACLDATA_PKT:
2871 BT_DBG("%s ACL data packet", hdev->name);
2872 hci_acldata_packet(hdev, skb);
2873 break;
2874
2875 case HCI_SCODATA_PKT:
2876 BT_DBG("%s SCO data packet", hdev->name);
2877 hci_scodata_packet(hdev, skb);
2878 break;
2879
2880 default:
2881 kfree_skb(skb);
2882 break;
2883 }
2884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885}
2886
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002887static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002889 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 struct sk_buff *skb;
2891
2892 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2893
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002895 if (atomic_read(&hdev->cmd_cnt)) {
2896 skb = skb_dequeue(&hdev->cmd_q);
2897 if (!skb)
2898 return;
2899
Wei Yongjun7585b972009-02-25 18:29:52 +08002900 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002902 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2903 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 atomic_dec(&hdev->cmd_cnt);
2905 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002906 if (test_bit(HCI_RESET, &hdev->flags))
2907 del_timer(&hdev->cmd_timer);
2908 else
2909 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002910 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 } else {
2912 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002913 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 }
2915 }
2916}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002917
2918int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2919{
2920 /* General inquiry access code (GIAC) */
2921 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2922 struct hci_cp_inquiry cp;
2923
2924 BT_DBG("%s", hdev->name);
2925
2926 if (test_bit(HCI_INQUIRY, &hdev->flags))
2927 return -EINPROGRESS;
2928
Johan Hedberg46632622012-01-02 16:06:08 +02002929 inquiry_cache_flush(hdev);
2930
Andre Guedes2519a1f2011-11-07 11:45:24 -03002931 memset(&cp, 0, sizeof(cp));
2932 memcpy(&cp.lap, lap, sizeof(cp.lap));
2933 cp.length = length;
2934
2935 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2936}
Andre Guedes023d5042011-11-04 14:16:52 -03002937
2938int hci_cancel_inquiry(struct hci_dev *hdev)
2939{
2940 BT_DBG("%s", hdev->name);
2941
2942 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2943 return -EPERM;
2944
2945 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2946}