blob: b5ba42db0561809910d65ad3f4ebdfb1a1e13e5d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -020064static DEFINE_MUTEX(hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66/* HCI device list */
67LIST_HEAD(hci_dev_list);
68DEFINE_RWLOCK(hci_dev_list_lock);
69
70/* HCI callback list */
71LIST_HEAD(hci_cb_list);
72DEFINE_RWLOCK(hci_cb_list_lock);
73
74/* HCI protocols */
75#define HCI_MAX_PROTO 2
76struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080079static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* ---- HCI notifications ---- */
82
83int hci_register_notifier(struct notifier_block *nb)
84{
Alan Sterne041c682006-03-27 01:16:30 -080085 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88int hci_unregister_notifier(struct notifier_block *nb)
89{
Alan Sterne041c682006-03-27 01:16:30 -080090 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Marcel Holtmann65164552005-10-28 19:20:48 +020093static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Alan Sterne041c682006-03-27 01:16:30 -080095 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
98/* ---- HCI requests ---- */
99
Johan Hedberg23bb5762010-12-21 23:01:27 +0200100void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
Johan Hedberga5040ef2011-01-10 13:28:59 +0200104 /* If this is the init phase check if the completed command matches
105 * the last init command, and if not just return.
106 */
107 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200108 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117static void hci_req_cancel(struct hci_dev *hdev, int err)
118{
119 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900129static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100130 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 DECLARE_WAITQUEUE(wait, current);
133 int err = 0;
134
135 BT_DBG("%s start", hdev->name);
136
137 hdev->req_status = HCI_REQ_PEND;
138
139 add_wait_queue(&hdev->req_wait_q, &wait);
140 set_current_state(TASK_INTERRUPTIBLE);
141
142 req(hdev, opt);
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return -EINTR;
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700152 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Johan Hedberga5040ef2011-01-10 13:28:59 +0200164 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 return err;
169}
170
171static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100172 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
174 int ret;
175
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300192 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
196static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
197{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200198 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800200 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200201 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 BT_DBG("%s %ld", hdev->name, opt);
204
205 /* Driver initialization */
206
207 /* Special commands */
208 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700209 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200213 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 }
215 skb_queue_purge(&hdev->driver_init);
216
217 /* Mandatory initialization */
218
219 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300220 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
221 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200228 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200232 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
236
237 /* Read Class of Device */
238 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
239
240 /* Read Local Name */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200244 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246 /* Optional initialization */
247
248 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200249 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200250 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200255
256 bacpy(&cp.bdaddr, BDADDR_ANY);
257 cp.delete_all = 1;
258 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300261static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
262{
263 BT_DBG("%s", hdev->name);
264
265 /* Read LE buffer size */
266 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
267}
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
270{
271 __u8 scan = opt;
272
273 BT_DBG("%s %x", hdev->name, scan);
274
275 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200276 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277}
278
279static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 auth = opt;
282
283 BT_DBG("%s %x", hdev->name, auth);
284
285 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200286 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
289static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 encrypt = opt;
292
293 BT_DBG("%s %x", hdev->name, encrypt);
294
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200295 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200296 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200299static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __le16 policy = cpu_to_le16(opt);
302
Marcel Holtmanna418b892008-11-30 12:17:28 +0100303 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304
305 /* Default link policy */
306 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
307}
308
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900309/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 * Device is held on return. */
311struct hci_dev *hci_dev_get(int index)
312{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200313 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200321 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 if (d->id == index) {
323 hdev = hci_dev_hold(d);
324 break;
325 }
326 }
327 read_unlock(&hci_dev_list_lock);
328 return hdev;
329}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331/* ---- Inquiry support ---- */
332static void inquiry_cache_flush(struct hci_dev *hdev)
333{
334 struct inquiry_cache *cache = &hdev->inq_cache;
335 struct inquiry_entry *next = cache->list, *e;
336
337 BT_DBG("cache %p", cache);
338
339 cache->list = NULL;
340 while ((e = next)) {
341 next = e->next;
342 kfree(e);
343 }
344}
345
346struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
347{
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
350
351 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
352
353 for (e = cache->list; e; e = e->next)
354 if (!bacmp(&e->data.bdaddr, bdaddr))
355 break;
356 return e;
357}
358
359void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
360{
361 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200362 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
365
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200366 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
367 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200369 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
370 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200372
373 ie->next = cache->list;
374 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200377 memcpy(&ie->data, data, sizeof(*data));
378 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 cache->timestamp = jiffies;
380}
381
382static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
383{
384 struct inquiry_cache *cache = &hdev->inq_cache;
385 struct inquiry_info *info = (struct inquiry_info *) buf;
386 struct inquiry_entry *e;
387 int copied = 0;
388
389 for (e = cache->list; e && copied < num; e = e->next, copied++) {
390 struct inquiry_data *data = &e->data;
391 bacpy(&info->bdaddr, &data->bdaddr);
392 info->pscan_rep_mode = data->pscan_rep_mode;
393 info->pscan_period_mode = data->pscan_period_mode;
394 info->pscan_mode = data->pscan_mode;
395 memcpy(info->dev_class, data->dev_class, 3);
396 info->clock_offset = data->clock_offset;
397 info++;
398 }
399
400 BT_DBG("cache %p, copied %d", cache, copied);
401 return copied;
402}
403
404static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
405{
406 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
407 struct hci_cp_inquiry cp;
408
409 BT_DBG("%s", hdev->name);
410
411 if (test_bit(HCI_INQUIRY, &hdev->flags))
412 return;
413
414 /* Start Inquiry */
415 memcpy(&cp.lap, &ir->lap, 3);
416 cp.length = ir->length;
417 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200418 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420
421int hci_inquiry(void __user *arg)
422{
423 __u8 __user *ptr = arg;
424 struct hci_inquiry_req ir;
425 struct hci_dev *hdev;
426 int err = 0, do_inquiry = 0, max_rsp;
427 long timeo;
428 __u8 *buf;
429
430 if (copy_from_user(&ir, ptr, sizeof(ir)))
431 return -EFAULT;
432
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200433 hdev = hci_dev_get(ir.dev_id);
434 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return -ENODEV;
436
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300437 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300444 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Marcel Holtmann04837f62006-07-03 10:02:33 +0200446 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100460 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200461 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 err = -ENOMEM;
463 goto done;
464 }
465
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300466 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300468 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900477 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200494 hdev = hci_dev_get(dev);
495 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 return -ENODEV;
497
498 BT_DBG("%s %p", hdev->name, hdev);
499
500 hci_req_lock(hdev);
501
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200502 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
503 ret = -ERFKILL;
504 goto done;
505 }
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (test_bit(HCI_UP, &hdev->flags)) {
508 ret = -EALREADY;
509 goto done;
510 }
511
512 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513 set_bit(HCI_RAW, &hdev->flags);
514
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200515 /* Treat all non BR/EDR controllers as raw devices if
516 enable_hs is not set */
517 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100518 set_bit(HCI_RAW, &hdev->flags);
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (hdev->open(hdev)) {
521 ret = -EIO;
522 goto done;
523 }
524
525 if (!test_bit(HCI_RAW, &hdev->flags)) {
526 atomic_set(&hdev->cmd_cnt, 1);
527 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200528 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Marcel Holtmann04837f62006-07-03 10:02:33 +0200530 ret = __hci_request(hdev, hci_init_req, 0,
531 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Andre Guedeseead27d2011-06-30 19:20:55 -0300533 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300534 ret = __hci_request(hdev, hci_le_init_req, 0,
535 msecs_to_jiffies(HCI_INIT_TIMEOUT));
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 clear_bit(HCI_INIT, &hdev->flags);
538 }
539
540 if (!ret) {
541 hci_dev_hold(hdev);
542 set_bit(HCI_UP, &hdev->flags);
543 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200544 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300545 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200546 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300547 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200548 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900549 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200551 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200552 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400553 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 skb_queue_purge(&hdev->cmd_q);
556 skb_queue_purge(&hdev->rx_q);
557
558 if (hdev->flush)
559 hdev->flush(hdev);
560
561 if (hdev->sent_cmd) {
562 kfree_skb(hdev->sent_cmd);
563 hdev->sent_cmd = NULL;
564 }
565
566 hdev->close(hdev);
567 hdev->flags = 0;
568 }
569
570done:
571 hci_req_unlock(hdev);
572 hci_dev_put(hdev);
573 return ret;
574}
575
576static int hci_dev_do_close(struct hci_dev *hdev)
577{
578 BT_DBG("%s %p", hdev->name, hdev);
579
580 hci_req_cancel(hdev, ENODEV);
581 hci_req_lock(hdev);
582
583 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300584 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 hci_req_unlock(hdev);
586 return 0;
587 }
588
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200589 /* Flush RX and TX works */
590 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400591 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200593 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200594 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200595 hdev->discov_timeout = 0;
596 }
597
Johan Hedberg32435532011-11-07 22:16:04 +0200598 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200599 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200600
Johan Hedberg7d785252011-12-15 00:47:39 +0200601 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
602 cancel_delayed_work(&hdev->service_cache);
603
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300604 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 inquiry_cache_flush(hdev);
606 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300607 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 hci_notify(hdev, HCI_DEV_DOWN);
610
611 if (hdev->flush)
612 hdev->flush(hdev);
613
614 /* Reset device */
615 skb_queue_purge(&hdev->cmd_q);
616 atomic_set(&hdev->cmd_cnt, 1);
617 if (!test_bit(HCI_RAW, &hdev->flags)) {
618 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200619 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200620 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 clear_bit(HCI_INIT, &hdev->flags);
622 }
623
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200624 /* flush cmd work */
625 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 /* Drop queues */
628 skb_queue_purge(&hdev->rx_q);
629 skb_queue_purge(&hdev->cmd_q);
630 skb_queue_purge(&hdev->raw_q);
631
632 /* Drop last sent command */
633 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300634 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 kfree_skb(hdev->sent_cmd);
636 hdev->sent_cmd = NULL;
637 }
638
639 /* After this point our queues are empty
640 * and no tasks are scheduled. */
641 hdev->close(hdev);
642
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300643 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200644 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300645 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 /* Clear flags */
648 hdev->flags = 0;
649
650 hci_req_unlock(hdev);
651
652 hci_dev_put(hdev);
653 return 0;
654}
655
656int hci_dev_close(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int err;
660
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200661 hdev = hci_dev_get(dev);
662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return -ENODEV;
664 err = hci_dev_do_close(hdev);
665 hci_dev_put(hdev);
666 return err;
667}
668
669int hci_dev_reset(__u16 dev)
670{
671 struct hci_dev *hdev;
672 int ret = 0;
673
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200674 hdev = hci_dev_get(dev);
675 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return -ENODEV;
677
678 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 if (!test_bit(HCI_UP, &hdev->flags))
681 goto done;
682
683 /* Drop queues */
684 skb_queue_purge(&hdev->rx_q);
685 skb_queue_purge(&hdev->cmd_q);
686
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300687 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 inquiry_cache_flush(hdev);
689 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300690 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 if (hdev->flush)
693 hdev->flush(hdev);
694
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900695 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300696 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200699 ret = __hci_request(hdev, hci_reset_req, 0,
700 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 hci_req_unlock(hdev);
704 hci_dev_put(hdev);
705 return ret;
706}
707
708int hci_dev_reset_stat(__u16 dev)
709{
710 struct hci_dev *hdev;
711 int ret = 0;
712
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200713 hdev = hci_dev_get(dev);
714 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 return -ENODEV;
716
717 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
718
719 hci_dev_put(hdev);
720
721 return ret;
722}
723
724int hci_dev_cmd(unsigned int cmd, void __user *arg)
725{
726 struct hci_dev *hdev;
727 struct hci_dev_req dr;
728 int err = 0;
729
730 if (copy_from_user(&dr, arg, sizeof(dr)))
731 return -EFAULT;
732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200733 hdev = hci_dev_get(dr.dev_id);
734 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return -ENODEV;
736
737 switch (cmd) {
738 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200739 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 break;
742
743 case HCISETENCRYPT:
744 if (!lmp_encrypt_capable(hdev)) {
745 err = -EOPNOTSUPP;
746 break;
747 }
748
749 if (!test_bit(HCI_AUTH, &hdev->flags)) {
750 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200751 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (err)
754 break;
755 }
756
Marcel Holtmann04837f62006-07-03 10:02:33 +0200757 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
758 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 break;
760
761 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200762 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
763 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 break;
765
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200766 case HCISETLINKPOL:
767 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
769 break;
770
771 case HCISETLINKMODE:
772 hdev->link_mode = ((__u16) dr.dev_opt) &
773 (HCI_LM_MASTER | HCI_LM_ACCEPT);
774 break;
775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 case HCISETPTYPE:
777 hdev->pkt_type = (__u16) dr.dev_opt;
778 break;
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200781 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
782 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 break;
784
785 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200786 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
787 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 break;
789
790 default:
791 err = -EINVAL;
792 break;
793 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 hci_dev_put(hdev);
796 return err;
797}
798
799int hci_get_dev_list(void __user *arg)
800{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200801 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct hci_dev_list_req *dl;
803 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 int n = 0, size, err;
805 __u16 dev_num;
806
807 if (get_user(dev_num, (__u16 __user *) arg))
808 return -EFAULT;
809
810 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
811 return -EINVAL;
812
813 size = sizeof(*dl) + dev_num * sizeof(*dr);
814
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200815 dl = kzalloc(size, GFP_KERNEL);
816 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 return -ENOMEM;
818
819 dr = dl->dev_req;
820
821 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200822 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200823 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200824 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200825
826 if (!test_bit(HCI_MGMT, &hdev->flags))
827 set_bit(HCI_PAIRABLE, &hdev->flags);
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 (dr + n)->dev_id = hdev->id;
830 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (++n >= dev_num)
833 break;
834 }
835 read_unlock_bh(&hci_dev_list_lock);
836
837 dl->dev_num = n;
838 size = sizeof(*dl) + n * sizeof(*dr);
839
840 err = copy_to_user(arg, dl, size);
841 kfree(dl);
842
843 return err ? -EFAULT : 0;
844}
845
846int hci_get_dev_info(void __user *arg)
847{
848 struct hci_dev *hdev;
849 struct hci_dev_info di;
850 int err = 0;
851
852 if (copy_from_user(&di, arg, sizeof(di)))
853 return -EFAULT;
854
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200855 hdev = hci_dev_get(di.dev_id);
856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return -ENODEV;
858
Johan Hedberg32435532011-11-07 22:16:04 +0200859 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
860 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200861
Johan Hedbergc542a062011-01-26 13:11:03 +0200862 if (!test_bit(HCI_MGMT, &hdev->flags))
863 set_bit(HCI_PAIRABLE, &hdev->flags);
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 strcpy(di.name, hdev->name);
866 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100867 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 di.flags = hdev->flags;
869 di.pkt_type = hdev->pkt_type;
870 di.acl_mtu = hdev->acl_mtu;
871 di.acl_pkts = hdev->acl_pkts;
872 di.sco_mtu = hdev->sco_mtu;
873 di.sco_pkts = hdev->sco_pkts;
874 di.link_policy = hdev->link_policy;
875 di.link_mode = hdev->link_mode;
876
877 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
878 memcpy(&di.features, &hdev->features, sizeof(di.features));
879
880 if (copy_to_user(arg, &di, sizeof(di)))
881 err = -EFAULT;
882
883 hci_dev_put(hdev);
884
885 return err;
886}
887
888/* ---- Interface to HCI drivers ---- */
889
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200890static int hci_rfkill_set_block(void *data, bool blocked)
891{
892 struct hci_dev *hdev = data;
893
894 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
895
896 if (!blocked)
897 return 0;
898
899 hci_dev_do_close(hdev);
900
901 return 0;
902}
903
904static const struct rfkill_ops hci_rfkill_ops = {
905 .set_block = hci_rfkill_set_block,
906};
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908/* Alloc HCI device */
909struct hci_dev *hci_alloc_dev(void)
910{
911 struct hci_dev *hdev;
912
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200913 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (!hdev)
915 return NULL;
916
David Herrmann0ac7e702011-10-08 14:58:47 +0200917 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 skb_queue_head_init(&hdev->driver_init);
919
920 return hdev;
921}
922EXPORT_SYMBOL(hci_alloc_dev);
923
924/* Free HCI device */
925void hci_free_dev(struct hci_dev *hdev)
926{
927 skb_queue_purge(&hdev->driver_init);
928
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200929 /* will free via device release */
930 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931}
932EXPORT_SYMBOL(hci_free_dev);
933
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200934static void hci_power_on(struct work_struct *work)
935{
936 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
937
938 BT_DBG("%s", hdev->name);
939
940 if (hci_dev_open(hdev->id) < 0)
941 return;
942
943 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -0200944 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +0200945 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200946
947 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200948 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200949}
950
951static void hci_power_off(struct work_struct *work)
952{
Johan Hedberg32435532011-11-07 22:16:04 +0200953 struct hci_dev *hdev = container_of(work, struct hci_dev,
954 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200955
956 BT_DBG("%s", hdev->name);
957
Johan Hedberg32435532011-11-07 22:16:04 +0200958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200960 hci_dev_close(hdev->id);
961}
962
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200963static void hci_discov_off(struct work_struct *work)
964{
965 struct hci_dev *hdev;
966 u8 scan = SCAN_PAGE;
967
968 hdev = container_of(work, struct hci_dev, discov_off.work);
969
970 BT_DBG("%s", hdev->name);
971
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300972 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200973
974 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
975
976 hdev->discov_timeout = 0;
977
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300978 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200979}
980
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200981int hci_uuids_clear(struct hci_dev *hdev)
982{
983 struct list_head *p, *n;
984
985 list_for_each_safe(p, n, &hdev->uuids) {
986 struct bt_uuid *uuid;
987
988 uuid = list_entry(p, struct bt_uuid, list);
989
990 list_del(p);
991 kfree(uuid);
992 }
993
994 return 0;
995}
996
Johan Hedberg55ed8ca12011-01-17 14:41:05 +0200997int hci_link_keys_clear(struct hci_dev *hdev)
998{
999 struct list_head *p, *n;
1000
1001 list_for_each_safe(p, n, &hdev->link_keys) {
1002 struct link_key *key;
1003
1004 key = list_entry(p, struct link_key, list);
1005
1006 list_del(p);
1007 kfree(key);
1008 }
1009
1010 return 0;
1011}
1012
1013struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1014{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001015 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001016
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001017 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001020
1021 return NULL;
1022}
1023
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001024static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 u8 key_type, u8 old_key_type)
1026{
1027 /* Legacy key */
1028 if (key_type < 0x03)
1029 return 1;
1030
1031 /* Debug keys are insecure so don't store them persistently */
1032 if (key_type == HCI_LK_DEBUG_COMBINATION)
1033 return 0;
1034
1035 /* Changed combination key and there's no previous one */
1036 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1037 return 0;
1038
1039 /* Security mode 3 case */
1040 if (!conn)
1041 return 1;
1042
1043 /* Neither local nor remote side had no-bonding as requirement */
1044 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1045 return 1;
1046
1047 /* Local side had dedicated bonding as requirement */
1048 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1049 return 1;
1050
1051 /* Remote side had dedicated bonding as requirement */
1052 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1053 return 1;
1054
1055 /* If none of the above criteria match, then don't store the key
1056 * persistently */
1057 return 0;
1058}
1059
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001060struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1061{
1062 struct link_key *k;
1063
1064 list_for_each_entry(k, &hdev->link_keys, list) {
1065 struct key_master_id *id;
1066
1067 if (k->type != HCI_LK_SMP_LTK)
1068 continue;
1069
1070 if (k->dlen != sizeof(*id))
1071 continue;
1072
1073 id = (void *) &k->data;
1074 if (id->ediv == ediv &&
1075 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1076 return k;
1077 }
1078
1079 return NULL;
1080}
1081EXPORT_SYMBOL(hci_find_ltk);
1082
1083struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1084 bdaddr_t *bdaddr, u8 type)
1085{
1086 struct link_key *k;
1087
1088 list_for_each_entry(k, &hdev->link_keys, list)
1089 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 return k;
1091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001096int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1097 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001098{
1099 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001100 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001101
1102 old_key = hci_find_link_key(hdev, bdaddr);
1103 if (old_key) {
1104 old_key_type = old_key->type;
1105 key = old_key;
1106 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001107 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001108 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1109 if (!key)
1110 return -ENOMEM;
1111 list_add(&key->list, &hdev->link_keys);
1112 }
1113
1114 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1115
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001116 /* Some buggy controller combinations generate a changed
1117 * combination key for legacy pairing even when there's no
1118 * previous key */
1119 if (type == HCI_LK_CHANGED_COMBINATION &&
1120 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001121 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001122 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001123 if (conn)
1124 conn->key_type = type;
1125 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001126
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001127 bacpy(&key->bdaddr, bdaddr);
1128 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001129 key->pin_len = pin_len;
1130
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001131 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001132 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001133 else
1134 key->type = type;
1135
Johan Hedberg4df378a2011-04-28 11:29:03 -07001136 if (!new_key)
1137 return 0;
1138
1139 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1140
Johan Hedberg744cf192011-11-08 20:40:14 +02001141 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001142
1143 if (!persistent) {
1144 list_del(&key->list);
1145 kfree(key);
1146 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001147
1148 return 0;
1149}
1150
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001151int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001152 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001153{
1154 struct link_key *key, *old_key;
1155 struct key_master_id *id;
1156 u8 old_key_type;
1157
1158 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1159
1160 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1161 if (old_key) {
1162 key = old_key;
1163 old_key_type = old_key->type;
1164 } else {
1165 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1166 if (!key)
1167 return -ENOMEM;
1168 list_add(&key->list, &hdev->link_keys);
1169 old_key_type = 0xff;
1170 }
1171
1172 key->dlen = sizeof(*id);
1173
1174 bacpy(&key->bdaddr, bdaddr);
1175 memcpy(key->val, ltk, sizeof(key->val));
1176 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001177 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001178
1179 id = (void *) &key->data;
1180 id->ediv = ediv;
1181 memcpy(id->rand, rand, sizeof(id->rand));
1182
1183 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001184 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001185
1186 return 0;
1187}
1188
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001189int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1190{
1191 struct link_key *key;
1192
1193 key = hci_find_link_key(hdev, bdaddr);
1194 if (!key)
1195 return -ENOENT;
1196
1197 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1198
1199 list_del(&key->list);
1200 kfree(key);
1201
1202 return 0;
1203}
1204
Ville Tervo6bd32322011-02-16 16:32:41 +02001205/* HCI command timer function */
1206static void hci_cmd_timer(unsigned long arg)
1207{
1208 struct hci_dev *hdev = (void *) arg;
1209
1210 BT_ERR("%s command tx timeout", hdev->name);
1211 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001212 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001213}
1214
Szymon Janc2763eda2011-03-22 13:12:22 +01001215struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1216 bdaddr_t *bdaddr)
1217{
1218 struct oob_data *data;
1219
1220 list_for_each_entry(data, &hdev->remote_oob_data, list)
1221 if (bacmp(bdaddr, &data->bdaddr) == 0)
1222 return data;
1223
1224 return NULL;
1225}
1226
1227int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1228{
1229 struct oob_data *data;
1230
1231 data = hci_find_remote_oob_data(hdev, bdaddr);
1232 if (!data)
1233 return -ENOENT;
1234
1235 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1236
1237 list_del(&data->list);
1238 kfree(data);
1239
1240 return 0;
1241}
1242
1243int hci_remote_oob_data_clear(struct hci_dev *hdev)
1244{
1245 struct oob_data *data, *n;
1246
1247 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1248 list_del(&data->list);
1249 kfree(data);
1250 }
1251
1252 return 0;
1253}
1254
1255int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1256 u8 *randomizer)
1257{
1258 struct oob_data *data;
1259
1260 data = hci_find_remote_oob_data(hdev, bdaddr);
1261
1262 if (!data) {
1263 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1264 if (!data)
1265 return -ENOMEM;
1266
1267 bacpy(&data->bdaddr, bdaddr);
1268 list_add(&data->list, &hdev->remote_oob_data);
1269 }
1270
1271 memcpy(data->hash, hash, sizeof(data->hash));
1272 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1273
1274 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1275
1276 return 0;
1277}
1278
Antti Julkub2a66aa2011-06-15 12:01:14 +03001279struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1280 bdaddr_t *bdaddr)
1281{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001282 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001283
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001284 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001285 if (bacmp(bdaddr, &b->bdaddr) == 0)
1286 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001287
1288 return NULL;
1289}
1290
1291int hci_blacklist_clear(struct hci_dev *hdev)
1292{
1293 struct list_head *p, *n;
1294
1295 list_for_each_safe(p, n, &hdev->blacklist) {
1296 struct bdaddr_list *b;
1297
1298 b = list_entry(p, struct bdaddr_list, list);
1299
1300 list_del(p);
1301 kfree(b);
1302 }
1303
1304 return 0;
1305}
1306
1307int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1308{
1309 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001310
1311 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1312 return -EBADF;
1313
Antti Julku5e762442011-08-25 16:48:02 +03001314 if (hci_blacklist_lookup(hdev, bdaddr))
1315 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001316
1317 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001318 if (!entry)
1319 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001320
1321 bacpy(&entry->bdaddr, bdaddr);
1322
1323 list_add(&entry->list, &hdev->blacklist);
1324
Johan Hedberg744cf192011-11-08 20:40:14 +02001325 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001326}
1327
1328int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1329{
1330 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001331
Szymon Janc1ec918c2011-11-16 09:32:21 +01001332 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001333 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001334
1335 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001336 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001337 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001338
1339 list_del(&entry->list);
1340 kfree(entry);
1341
Johan Hedberg744cf192011-11-08 20:40:14 +02001342 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001343}
1344
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001345static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001346{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001347 struct hci_dev *hdev = container_of(work, struct hci_dev,
1348 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001349
1350 hci_dev_lock(hdev);
1351
1352 hci_adv_entries_clear(hdev);
1353
1354 hci_dev_unlock(hdev);
1355}
1356
Andre Guedes76c86862011-05-26 16:23:50 -03001357int hci_adv_entries_clear(struct hci_dev *hdev)
1358{
1359 struct adv_entry *entry, *tmp;
1360
1361 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1362 list_del(&entry->list);
1363 kfree(entry);
1364 }
1365
1366 BT_DBG("%s adv cache cleared", hdev->name);
1367
1368 return 0;
1369}
1370
1371struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372{
1373 struct adv_entry *entry;
1374
1375 list_for_each_entry(entry, &hdev->adv_entries, list)
1376 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1377 return entry;
1378
1379 return NULL;
1380}
1381
1382static inline int is_connectable_adv(u8 evt_type)
1383{
1384 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1385 return 1;
1386
1387 return 0;
1388}
1389
1390int hci_add_adv_entry(struct hci_dev *hdev,
1391 struct hci_ev_le_advertising_info *ev)
1392{
1393 struct adv_entry *entry;
1394
1395 if (!is_connectable_adv(ev->evt_type))
1396 return -EINVAL;
1397
1398 /* Only new entries should be added to adv_entries. So, if
1399 * bdaddr was found, don't add it. */
1400 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1401 return 0;
1402
1403 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1404 if (!entry)
1405 return -ENOMEM;
1406
1407 bacpy(&entry->bdaddr, &ev->bdaddr);
1408 entry->bdaddr_type = ev->bdaddr_type;
1409
1410 list_add(&entry->list, &hdev->adv_entries);
1411
1412 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1413 batostr(&entry->bdaddr), entry->bdaddr_type);
1414
1415 return 0;
1416}
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418/* Register HCI device */
1419int hci_register_dev(struct hci_dev *hdev)
1420{
1421 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001422 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001424 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1425 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 if (!hdev->open || !hdev->close || !hdev->destruct)
1428 return -EINVAL;
1429
Mat Martineau08add512011-11-02 16:18:36 -07001430 /* Do not allow HCI_AMP devices to register at index 0,
1431 * so the index can be used as the AMP controller ID.
1432 */
1433 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 write_lock_bh(&hci_dev_list_lock);
1436
1437 /* Find first available device id */
1438 list_for_each(p, &hci_dev_list) {
1439 if (list_entry(p, struct hci_dev, list)->id != id)
1440 break;
1441 head = p; id++;
1442 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 sprintf(hdev->name, "hci%d", id);
1445 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001446 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001449 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001452 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001454 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001456 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
Marcel Holtmann04837f62006-07-03 10:02:33 +02001458 hdev->idle_timeout = 0;
1459 hdev->sniff_max_interval = 800;
1460 hdev->sniff_min_interval = 80;
1461
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001462 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001463 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001464 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001465
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
1467 skb_queue_head_init(&hdev->rx_q);
1468 skb_queue_head_init(&hdev->cmd_q);
1469 skb_queue_head_init(&hdev->raw_q);
1470
Ville Tervo6bd32322011-02-16 16:32:41 +02001471 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1472
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301473 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001474 hdev->reassembly[i] = NULL;
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001477 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 inquiry_cache_init(hdev);
1480
1481 hci_conn_hash_init(hdev);
1482
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001483 INIT_LIST_HEAD(&hdev->mgmt_pending);
1484
David Millerea4bd8b2010-07-30 21:54:49 -07001485 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001486
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001487 INIT_LIST_HEAD(&hdev->uuids);
1488
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001489 INIT_LIST_HEAD(&hdev->link_keys);
1490
Szymon Janc2763eda2011-03-22 13:12:22 +01001491 INIT_LIST_HEAD(&hdev->remote_oob_data);
1492
Andre Guedes76c86862011-05-26 16:23:50 -03001493 INIT_LIST_HEAD(&hdev->adv_entries);
1494
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001495 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001496 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001497 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001498
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001499 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1502
1503 atomic_set(&hdev->promisc, 0);
1504
1505 write_unlock_bh(&hci_dev_list_lock);
1506
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001507 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1508 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001509 if (!hdev->workqueue) {
1510 error = -ENOMEM;
1511 goto err;
1512 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001513
David Herrmann33ca9542011-10-08 14:58:49 +02001514 error = hci_add_sysfs(hdev);
1515 if (error < 0)
1516 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001518 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1519 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1520 if (hdev->rfkill) {
1521 if (rfkill_register(hdev->rfkill) < 0) {
1522 rfkill_destroy(hdev->rfkill);
1523 hdev->rfkill = NULL;
1524 }
1525 }
1526
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001527 set_bit(HCI_AUTO_OFF, &hdev->flags);
1528 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001529 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 hci_notify(hdev, HCI_DEV_REG);
1532
1533 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001534
David Herrmann33ca9542011-10-08 14:58:49 +02001535err_wqueue:
1536 destroy_workqueue(hdev->workqueue);
1537err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001538 write_lock_bh(&hci_dev_list_lock);
1539 list_del(&hdev->list);
1540 write_unlock_bh(&hci_dev_list_lock);
1541
David Herrmann33ca9542011-10-08 14:58:49 +02001542 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544EXPORT_SYMBOL(hci_register_dev);
1545
1546/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001547void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548{
Marcel Holtmannef222012007-07-11 06:42:04 +02001549 int i;
1550
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001551 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 write_lock_bh(&hci_dev_list_lock);
1554 list_del(&hdev->list);
1555 write_unlock_bh(&hci_dev_list_lock);
1556
1557 hci_dev_do_close(hdev);
1558
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301559 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001560 kfree_skb(hdev->reassembly[i]);
1561
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001562 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001563 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001564 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001565 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001566 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001567 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001568
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001569 /* mgmt_index_removed should take care of emptying the
1570 * pending list */
1571 BUG_ON(!list_empty(&hdev->mgmt_pending));
1572
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 hci_notify(hdev, HCI_DEV_UNREG);
1574
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001575 if (hdev->rfkill) {
1576 rfkill_unregister(hdev->rfkill);
1577 rfkill_destroy(hdev->rfkill);
1578 }
1579
David Herrmannce242972011-10-08 14:58:48 +02001580 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001581
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001582 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001583
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001584 destroy_workqueue(hdev->workqueue);
1585
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001586 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001587 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001588 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001589 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001590 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001591 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001592 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001593
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595}
1596EXPORT_SYMBOL(hci_unregister_dev);
1597
1598/* Suspend HCI device */
1599int hci_suspend_dev(struct hci_dev *hdev)
1600{
1601 hci_notify(hdev, HCI_DEV_SUSPEND);
1602 return 0;
1603}
1604EXPORT_SYMBOL(hci_suspend_dev);
1605
1606/* Resume HCI device */
1607int hci_resume_dev(struct hci_dev *hdev)
1608{
1609 hci_notify(hdev, HCI_DEV_RESUME);
1610 return 0;
1611}
1612EXPORT_SYMBOL(hci_resume_dev);
1613
Marcel Holtmann76bca882009-11-18 00:40:39 +01001614/* Receive frame from HCI drivers */
1615int hci_recv_frame(struct sk_buff *skb)
1616{
1617 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1618 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1619 && !test_bit(HCI_INIT, &hdev->flags))) {
1620 kfree_skb(skb);
1621 return -ENXIO;
1622 }
1623
1624 /* Incomming skb */
1625 bt_cb(skb)->incoming = 1;
1626
1627 /* Time stamp */
1628 __net_timestamp(skb);
1629
Marcel Holtmann76bca882009-11-18 00:40:39 +01001630 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001631 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001632
Marcel Holtmann76bca882009-11-18 00:40:39 +01001633 return 0;
1634}
1635EXPORT_SYMBOL(hci_recv_frame);
1636
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301637static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001638 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301639{
1640 int len = 0;
1641 int hlen = 0;
1642 int remain = count;
1643 struct sk_buff *skb;
1644 struct bt_skb_cb *scb;
1645
1646 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1647 index >= NUM_REASSEMBLY)
1648 return -EILSEQ;
1649
1650 skb = hdev->reassembly[index];
1651
1652 if (!skb) {
1653 switch (type) {
1654 case HCI_ACLDATA_PKT:
1655 len = HCI_MAX_FRAME_SIZE;
1656 hlen = HCI_ACL_HDR_SIZE;
1657 break;
1658 case HCI_EVENT_PKT:
1659 len = HCI_MAX_EVENT_SIZE;
1660 hlen = HCI_EVENT_HDR_SIZE;
1661 break;
1662 case HCI_SCODATA_PKT:
1663 len = HCI_MAX_SCO_SIZE;
1664 hlen = HCI_SCO_HDR_SIZE;
1665 break;
1666 }
1667
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001668 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301669 if (!skb)
1670 return -ENOMEM;
1671
1672 scb = (void *) skb->cb;
1673 scb->expect = hlen;
1674 scb->pkt_type = type;
1675
1676 skb->dev = (void *) hdev;
1677 hdev->reassembly[index] = skb;
1678 }
1679
1680 while (count) {
1681 scb = (void *) skb->cb;
1682 len = min(scb->expect, (__u16)count);
1683
1684 memcpy(skb_put(skb, len), data, len);
1685
1686 count -= len;
1687 data += len;
1688 scb->expect -= len;
1689 remain = count;
1690
1691 switch (type) {
1692 case HCI_EVENT_PKT:
1693 if (skb->len == HCI_EVENT_HDR_SIZE) {
1694 struct hci_event_hdr *h = hci_event_hdr(skb);
1695 scb->expect = h->plen;
1696
1697 if (skb_tailroom(skb) < scb->expect) {
1698 kfree_skb(skb);
1699 hdev->reassembly[index] = NULL;
1700 return -ENOMEM;
1701 }
1702 }
1703 break;
1704
1705 case HCI_ACLDATA_PKT:
1706 if (skb->len == HCI_ACL_HDR_SIZE) {
1707 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1708 scb->expect = __le16_to_cpu(h->dlen);
1709
1710 if (skb_tailroom(skb) < scb->expect) {
1711 kfree_skb(skb);
1712 hdev->reassembly[index] = NULL;
1713 return -ENOMEM;
1714 }
1715 }
1716 break;
1717
1718 case HCI_SCODATA_PKT:
1719 if (skb->len == HCI_SCO_HDR_SIZE) {
1720 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1721 scb->expect = h->dlen;
1722
1723 if (skb_tailroom(skb) < scb->expect) {
1724 kfree_skb(skb);
1725 hdev->reassembly[index] = NULL;
1726 return -ENOMEM;
1727 }
1728 }
1729 break;
1730 }
1731
1732 if (scb->expect == 0) {
1733 /* Complete frame */
1734
1735 bt_cb(skb)->pkt_type = type;
1736 hci_recv_frame(skb);
1737
1738 hdev->reassembly[index] = NULL;
1739 return remain;
1740 }
1741 }
1742
1743 return remain;
1744}
1745
Marcel Holtmannef222012007-07-11 06:42:04 +02001746int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1747{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301748 int rem = 0;
1749
Marcel Holtmannef222012007-07-11 06:42:04 +02001750 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1751 return -EILSEQ;
1752
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001753 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001754 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301755 if (rem < 0)
1756 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001757
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301758 data += (count - rem);
1759 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001760 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001761
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301762 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001763}
1764EXPORT_SYMBOL(hci_recv_fragment);
1765
Suraj Sumangala99811512010-07-14 13:02:19 +05301766#define STREAM_REASSEMBLY 0
1767
1768int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1769{
1770 int type;
1771 int rem = 0;
1772
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001773 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301774 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1775
1776 if (!skb) {
1777 struct { char type; } *pkt;
1778
1779 /* Start of the frame */
1780 pkt = data;
1781 type = pkt->type;
1782
1783 data++;
1784 count--;
1785 } else
1786 type = bt_cb(skb)->pkt_type;
1787
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001788 rem = hci_reassembly(hdev, type, data, count,
1789 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301790 if (rem < 0)
1791 return rem;
1792
1793 data += (count - rem);
1794 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001795 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301796
1797 return rem;
1798}
1799EXPORT_SYMBOL(hci_recv_stream_fragment);
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801/* ---- Interface to upper protocols ---- */
1802
1803/* Register/Unregister protocols.
1804 * hci_task_lock is used to ensure that no tasks are running. */
1805int hci_register_proto(struct hci_proto *hp)
1806{
1807 int err = 0;
1808
1809 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1810
1811 if (hp->id >= HCI_MAX_PROTO)
1812 return -EINVAL;
1813
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001814 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816 if (!hci_proto[hp->id])
1817 hci_proto[hp->id] = hp;
1818 else
1819 err = -EEXIST;
1820
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001821 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823 return err;
1824}
1825EXPORT_SYMBOL(hci_register_proto);
1826
1827int hci_unregister_proto(struct hci_proto *hp)
1828{
1829 int err = 0;
1830
1831 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1832
1833 if (hp->id >= HCI_MAX_PROTO)
1834 return -EINVAL;
1835
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001836 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
1838 if (hci_proto[hp->id])
1839 hci_proto[hp->id] = NULL;
1840 else
1841 err = -ENOENT;
1842
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001843 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
1845 return err;
1846}
1847EXPORT_SYMBOL(hci_unregister_proto);
1848
1849int hci_register_cb(struct hci_cb *cb)
1850{
1851 BT_DBG("%p name %s", cb, cb->name);
1852
1853 write_lock_bh(&hci_cb_list_lock);
1854 list_add(&cb->list, &hci_cb_list);
1855 write_unlock_bh(&hci_cb_list_lock);
1856
1857 return 0;
1858}
1859EXPORT_SYMBOL(hci_register_cb);
1860
1861int hci_unregister_cb(struct hci_cb *cb)
1862{
1863 BT_DBG("%p name %s", cb, cb->name);
1864
1865 write_lock_bh(&hci_cb_list_lock);
1866 list_del(&cb->list);
1867 write_unlock_bh(&hci_cb_list_lock);
1868
1869 return 0;
1870}
1871EXPORT_SYMBOL(hci_unregister_cb);
1872
1873static int hci_send_frame(struct sk_buff *skb)
1874{
1875 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1876
1877 if (!hdev) {
1878 kfree_skb(skb);
1879 return -ENODEV;
1880 }
1881
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001882 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 if (atomic_read(&hdev->promisc)) {
1885 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001886 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001888 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 }
1890
1891 /* Get rid of skb owner, prior to sending to the driver. */
1892 skb_orphan(skb);
1893
1894 return hdev->send(skb);
1895}
1896
1897/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001898int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899{
1900 int len = HCI_COMMAND_HDR_SIZE + plen;
1901 struct hci_command_hdr *hdr;
1902 struct sk_buff *skb;
1903
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001904 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
1906 skb = bt_skb_alloc(len, GFP_ATOMIC);
1907 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001908 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 return -ENOMEM;
1910 }
1911
1912 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001913 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 hdr->plen = plen;
1915
1916 if (plen)
1917 memcpy(skb_put(skb, plen), param, plen);
1918
1919 BT_DBG("skb len %d", skb->len);
1920
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001921 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001923
Johan Hedberga5040ef2011-01-10 13:28:59 +02001924 if (test_bit(HCI_INIT, &hdev->flags))
1925 hdev->init_last_cmd = opcode;
1926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001928 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930 return 0;
1931}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001934void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
1936 struct hci_command_hdr *hdr;
1937
1938 if (!hdev->sent_cmd)
1939 return NULL;
1940
1941 hdr = (void *) hdev->sent_cmd->data;
1942
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001943 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 return NULL;
1945
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001946 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947
1948 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1949}
1950
1951/* Send ACL data */
1952static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1953{
1954 struct hci_acl_hdr *hdr;
1955 int len = skb->len;
1956
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001957 skb_push(skb, HCI_ACL_HDR_SIZE);
1958 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001959 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001960 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1961 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001964static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1965 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966{
1967 struct hci_dev *hdev = conn->hdev;
1968 struct sk_buff *list;
1969
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001970 list = skb_shinfo(skb)->frag_list;
1971 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 /* Non fragmented */
1973 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1974
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001975 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 } else {
1977 /* Fragmented */
1978 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1979
1980 skb_shinfo(skb)->frag_list = NULL;
1981
1982 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001983 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001985 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001986
1987 flags &= ~ACL_START;
1988 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 do {
1990 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001993 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001994 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
1996 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1997
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001998 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 } while (list);
2000
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002001 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002003}
2004
2005void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2006{
2007 struct hci_conn *conn = chan->conn;
2008 struct hci_dev *hdev = conn->hdev;
2009
2010 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2011
2012 skb->dev = (void *) hdev;
2013 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2014 hci_add_acl_hdr(skb, conn->handle, flags);
2015
2016 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002018 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019}
2020EXPORT_SYMBOL(hci_send_acl);
2021
2022/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002023void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024{
2025 struct hci_dev *hdev = conn->hdev;
2026 struct hci_sco_hdr hdr;
2027
2028 BT_DBG("%s len %d", hdev->name, skb->len);
2029
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002030 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 hdr.dlen = skb->len;
2032
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002033 skb_push(skb, HCI_SCO_HDR_SIZE);
2034 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002035 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
2037 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002038 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002039
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002041 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042}
2043EXPORT_SYMBOL(hci_send_sco);
2044
2045/* ---- HCI TX task (outgoing data) ---- */
2046
2047/* HCI Connection scheduler */
2048static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2049{
2050 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002051 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002054 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002056
2057 rcu_read_lock();
2058
2059 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002060 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002062
2063 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2064 continue;
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 num++;
2067
2068 if (c->sent < min) {
2069 min = c->sent;
2070 conn = c;
2071 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002072
2073 if (hci_conn_num(hdev, type) == num)
2074 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 }
2076
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002077 rcu_read_unlock();
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002080 int cnt, q;
2081
2082 switch (conn->type) {
2083 case ACL_LINK:
2084 cnt = hdev->acl_cnt;
2085 break;
2086 case SCO_LINK:
2087 case ESCO_LINK:
2088 cnt = hdev->sco_cnt;
2089 break;
2090 case LE_LINK:
2091 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2092 break;
2093 default:
2094 cnt = 0;
2095 BT_ERR("Unknown link type");
2096 }
2097
2098 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 *quote = q ? q : 1;
2100 } else
2101 *quote = 0;
2102
2103 BT_DBG("conn %p quote %d", conn, *quote);
2104 return conn;
2105}
2106
Ville Tervobae1f5d92011-02-10 22:38:53 -03002107static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
2109 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002110 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
Ville Tervobae1f5d92011-02-10 22:38:53 -03002112 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002114 rcu_read_lock();
2115
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002117 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002118 if (c->type == type && c->sent) {
2119 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 hdev->name, batostr(&c->dst));
2121 hci_acl_disconn(c, 0x13);
2122 }
2123 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002124
2125 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126}
2127
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002128static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2129 int *quote)
2130{
2131 struct hci_conn_hash *h = &hdev->conn_hash;
2132 struct hci_chan *chan = NULL;
2133 int num = 0, min = ~0, cur_prio = 0;
2134 struct hci_conn *conn;
2135 int cnt, q, conn_num = 0;
2136
2137 BT_DBG("%s", hdev->name);
2138
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002139 rcu_read_lock();
2140
2141 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002142 struct hci_chan *tmp;
2143
2144 if (conn->type != type)
2145 continue;
2146
2147 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2148 continue;
2149
2150 conn_num++;
2151
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002152 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002153 struct sk_buff *skb;
2154
2155 if (skb_queue_empty(&tmp->data_q))
2156 continue;
2157
2158 skb = skb_peek(&tmp->data_q);
2159 if (skb->priority < cur_prio)
2160 continue;
2161
2162 if (skb->priority > cur_prio) {
2163 num = 0;
2164 min = ~0;
2165 cur_prio = skb->priority;
2166 }
2167
2168 num++;
2169
2170 if (conn->sent < min) {
2171 min = conn->sent;
2172 chan = tmp;
2173 }
2174 }
2175
2176 if (hci_conn_num(hdev, type) == conn_num)
2177 break;
2178 }
2179
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002180 rcu_read_unlock();
2181
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002182 if (!chan)
2183 return NULL;
2184
2185 switch (chan->conn->type) {
2186 case ACL_LINK:
2187 cnt = hdev->acl_cnt;
2188 break;
2189 case SCO_LINK:
2190 case ESCO_LINK:
2191 cnt = hdev->sco_cnt;
2192 break;
2193 case LE_LINK:
2194 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2195 break;
2196 default:
2197 cnt = 0;
2198 BT_ERR("Unknown link type");
2199 }
2200
2201 q = cnt / num;
2202 *quote = q ? q : 1;
2203 BT_DBG("chan %p quote %d", chan, *quote);
2204 return chan;
2205}
2206
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002207static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2208{
2209 struct hci_conn_hash *h = &hdev->conn_hash;
2210 struct hci_conn *conn;
2211 int num = 0;
2212
2213 BT_DBG("%s", hdev->name);
2214
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002215 rcu_read_lock();
2216
2217 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002218 struct hci_chan *chan;
2219
2220 if (conn->type != type)
2221 continue;
2222
2223 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2224 continue;
2225
2226 num++;
2227
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002228 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002229 struct sk_buff *skb;
2230
2231 if (chan->sent) {
2232 chan->sent = 0;
2233 continue;
2234 }
2235
2236 if (skb_queue_empty(&chan->data_q))
2237 continue;
2238
2239 skb = skb_peek(&chan->data_q);
2240 if (skb->priority >= HCI_PRIO_MAX - 1)
2241 continue;
2242
2243 skb->priority = HCI_PRIO_MAX - 1;
2244
2245 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2246 skb->priority);
2247 }
2248
2249 if (hci_conn_num(hdev, type) == num)
2250 break;
2251 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002252
2253 rcu_read_unlock();
2254
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002255}
2256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257static inline void hci_sched_acl(struct hci_dev *hdev)
2258{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002259 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 struct sk_buff *skb;
2261 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002262 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
2264 BT_DBG("%s", hdev->name);
2265
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002266 if (!hci_conn_num(hdev, ACL_LINK))
2267 return;
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 if (!test_bit(HCI_RAW, &hdev->flags)) {
2270 /* ACL tx timeout must be longer than maximum
2271 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002272 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002273 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 }
2275
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002276 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002277
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002278 while (hdev->acl_cnt &&
2279 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002280 u32 priority = (skb_peek(&chan->data_q))->priority;
2281 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002282 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2283 skb->len, skb->priority);
2284
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002285 /* Stop if priority has changed */
2286 if (skb->priority < priority)
2287 break;
2288
2289 skb = skb_dequeue(&chan->data_q);
2290
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002291 hci_conn_enter_active_mode(chan->conn,
2292 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002293
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 hci_send_frame(skb);
2295 hdev->acl_last_tx = jiffies;
2296
2297 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002298 chan->sent++;
2299 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 }
2301 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002302
2303 if (cnt != hdev->acl_cnt)
2304 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305}
2306
2307/* Schedule SCO */
2308static inline void hci_sched_sco(struct hci_dev *hdev)
2309{
2310 struct hci_conn *conn;
2311 struct sk_buff *skb;
2312 int quote;
2313
2314 BT_DBG("%s", hdev->name);
2315
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002316 if (!hci_conn_num(hdev, SCO_LINK))
2317 return;
2318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2320 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2321 BT_DBG("skb %p len %d", skb, skb->len);
2322 hci_send_frame(skb);
2323
2324 conn->sent++;
2325 if (conn->sent == ~0)
2326 conn->sent = 0;
2327 }
2328 }
2329}
2330
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002331static inline void hci_sched_esco(struct hci_dev *hdev)
2332{
2333 struct hci_conn *conn;
2334 struct sk_buff *skb;
2335 int quote;
2336
2337 BT_DBG("%s", hdev->name);
2338
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002339 if (!hci_conn_num(hdev, ESCO_LINK))
2340 return;
2341
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002342 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2343 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2344 BT_DBG("skb %p len %d", skb, skb->len);
2345 hci_send_frame(skb);
2346
2347 conn->sent++;
2348 if (conn->sent == ~0)
2349 conn->sent = 0;
2350 }
2351 }
2352}
2353
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002354static inline void hci_sched_le(struct hci_dev *hdev)
2355{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002356 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002357 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002358 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002359
2360 BT_DBG("%s", hdev->name);
2361
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002362 if (!hci_conn_num(hdev, LE_LINK))
2363 return;
2364
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002365 if (!test_bit(HCI_RAW, &hdev->flags)) {
2366 /* LE tx timeout must be longer than maximum
2367 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002368 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002369 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002370 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002371 }
2372
2373 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002374 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002375 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002376 u32 priority = (skb_peek(&chan->data_q))->priority;
2377 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002378 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2379 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002380
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002381 /* Stop if priority has changed */
2382 if (skb->priority < priority)
2383 break;
2384
2385 skb = skb_dequeue(&chan->data_q);
2386
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002387 hci_send_frame(skb);
2388 hdev->le_last_tx = jiffies;
2389
2390 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002391 chan->sent++;
2392 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002393 }
2394 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002395
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002396 if (hdev->le_pkts)
2397 hdev->le_cnt = cnt;
2398 else
2399 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002400
2401 if (cnt != tmp)
2402 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002403}
2404
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002405static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002407 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 struct sk_buff *skb;
2409
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002410 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002412 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2413 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
2415 /* Schedule queues and send stuff to HCI driver */
2416
2417 hci_sched_acl(hdev);
2418
2419 hci_sched_sco(hdev);
2420
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002421 hci_sched_esco(hdev);
2422
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002423 hci_sched_le(hdev);
2424
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 /* Send next queued raw (unknown type) packet */
2426 while ((skb = skb_dequeue(&hdev->raw_q)))
2427 hci_send_frame(skb);
2428
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002429 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430}
2431
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002432/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434/* ACL data packet */
2435static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2436{
2437 struct hci_acl_hdr *hdr = (void *) skb->data;
2438 struct hci_conn *conn;
2439 __u16 handle, flags;
2440
2441 skb_pull(skb, HCI_ACL_HDR_SIZE);
2442
2443 handle = __le16_to_cpu(hdr->handle);
2444 flags = hci_flags(handle);
2445 handle = hci_handle(handle);
2446
2447 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2448
2449 hdev->stat.acl_rx++;
2450
2451 hci_dev_lock(hdev);
2452 conn = hci_conn_hash_lookup_handle(hdev, handle);
2453 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 if (conn) {
2456 register struct hci_proto *hp;
2457
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002458 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002459
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002461 hp = hci_proto[HCI_PROTO_L2CAP];
2462 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 hp->recv_acldata(conn, skb, flags);
2464 return;
2465 }
2466 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002467 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 hdev->name, handle);
2469 }
2470
2471 kfree_skb(skb);
2472}
2473
2474/* SCO data packet */
2475static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2476{
2477 struct hci_sco_hdr *hdr = (void *) skb->data;
2478 struct hci_conn *conn;
2479 __u16 handle;
2480
2481 skb_pull(skb, HCI_SCO_HDR_SIZE);
2482
2483 handle = __le16_to_cpu(hdr->handle);
2484
2485 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2486
2487 hdev->stat.sco_rx++;
2488
2489 hci_dev_lock(hdev);
2490 conn = hci_conn_hash_lookup_handle(hdev, handle);
2491 hci_dev_unlock(hdev);
2492
2493 if (conn) {
2494 register struct hci_proto *hp;
2495
2496 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002497 hp = hci_proto[HCI_PROTO_SCO];
2498 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 hp->recv_scodata(conn, skb);
2500 return;
2501 }
2502 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002503 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 hdev->name, handle);
2505 }
2506
2507 kfree_skb(skb);
2508}
2509
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002510static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002512 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 struct sk_buff *skb;
2514
2515 BT_DBG("%s", hdev->name);
2516
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002517 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
2519 while ((skb = skb_dequeue(&hdev->rx_q))) {
2520 if (atomic_read(&hdev->promisc)) {
2521 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002522 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 }
2524
2525 if (test_bit(HCI_RAW, &hdev->flags)) {
2526 kfree_skb(skb);
2527 continue;
2528 }
2529
2530 if (test_bit(HCI_INIT, &hdev->flags)) {
2531 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002532 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 case HCI_ACLDATA_PKT:
2534 case HCI_SCODATA_PKT:
2535 kfree_skb(skb);
2536 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 }
2539
2540 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002541 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002543 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 hci_event_packet(hdev, skb);
2545 break;
2546
2547 case HCI_ACLDATA_PKT:
2548 BT_DBG("%s ACL data packet", hdev->name);
2549 hci_acldata_packet(hdev, skb);
2550 break;
2551
2552 case HCI_SCODATA_PKT:
2553 BT_DBG("%s SCO data packet", hdev->name);
2554 hci_scodata_packet(hdev, skb);
2555 break;
2556
2557 default:
2558 kfree_skb(skb);
2559 break;
2560 }
2561 }
2562
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002563 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564}
2565
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002566static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002568 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 struct sk_buff *skb;
2570
2571 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002574 if (atomic_read(&hdev->cmd_cnt)) {
2575 skb = skb_dequeue(&hdev->cmd_q);
2576 if (!skb)
2577 return;
2578
Wei Yongjun7585b972009-02-25 18:29:52 +08002579 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002581 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2582 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 atomic_dec(&hdev->cmd_cnt);
2584 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002585 if (test_bit(HCI_RESET, &hdev->flags))
2586 del_timer(&hdev->cmd_timer);
2587 else
2588 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002589 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 } else {
2591 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002592 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 }
2594 }
2595}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002596
2597int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2598{
2599 /* General inquiry access code (GIAC) */
2600 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2601 struct hci_cp_inquiry cp;
2602
2603 BT_DBG("%s", hdev->name);
2604
2605 if (test_bit(HCI_INQUIRY, &hdev->flags))
2606 return -EINPROGRESS;
2607
2608 memset(&cp, 0, sizeof(cp));
2609 memcpy(&cp.lap, lap, sizeof(cp.lap));
2610 cp.length = length;
2611
2612 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2613}
Andre Guedes023d50492011-11-04 14:16:52 -03002614
2615int hci_cancel_inquiry(struct hci_dev *hdev)
2616{
2617 BT_DBG("%s", hdev->name);
2618
2619 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2620 return -EPERM;
2621
2622 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2623}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002624
2625module_param(enable_hs, bool, 0644);
2626MODULE_PARM_DESC(enable_hs, "Enable High Speed");