blob: bb089e3bccef1f362f761908777aed53ce57dc6b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -020064static DEFINE_MUTEX(hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66/* HCI device list */
67LIST_HEAD(hci_dev_list);
68DEFINE_RWLOCK(hci_dev_list_lock);
69
70/* HCI callback list */
71LIST_HEAD(hci_cb_list);
72DEFINE_RWLOCK(hci_cb_list_lock);
73
74/* HCI protocols */
75#define HCI_MAX_PROTO 2
76struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080079static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* ---- HCI notifications ---- */
82
83int hci_register_notifier(struct notifier_block *nb)
84{
Alan Sterne041c682006-03-27 01:16:30 -080085 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88int hci_unregister_notifier(struct notifier_block *nb)
89{
Alan Sterne041c682006-03-27 01:16:30 -080090 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Marcel Holtmann65164552005-10-28 19:20:48 +020093static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Alan Sterne041c682006-03-27 01:16:30 -080095 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
98/* ---- HCI requests ---- */
99
Johan Hedberg23bb5762010-12-21 23:01:27 +0200100void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
Johan Hedberga5040ef2011-01-10 13:28:59 +0200104 /* If this is the init phase check if the completed command matches
105 * the last init command, and if not just return.
106 */
107 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200108 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117static void hci_req_cancel(struct hci_dev *hdev, int err)
118{
119 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900129static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100130 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 DECLARE_WAITQUEUE(wait, current);
133 int err = 0;
134
135 BT_DBG("%s start", hdev->name);
136
137 hdev->req_status = HCI_REQ_PEND;
138
139 add_wait_queue(&hdev->req_wait_q, &wait);
140 set_current_state(TASK_INTERRUPTIBLE);
141
142 req(hdev, opt);
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return -EINTR;
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700152 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Johan Hedberga5040ef2011-01-10 13:28:59 +0200164 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 return err;
169}
170
171static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100172 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
174 int ret;
175
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300192 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200196static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200198 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
248 /* Reset */
249 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
250
251 /* Read Local Version */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
253}
254
255static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
256{
257 struct sk_buff *skb;
258
259 BT_DBG("%s %ld", hdev->name, opt);
260
261 /* Driver initialization */
262
263 /* Special commands */
264 while ((skb = skb_dequeue(&hdev->driver_init))) {
265 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
266 skb->dev = (void *) hdev;
267
268 skb_queue_tail(&hdev->cmd_q, skb);
269 queue_work(hdev->workqueue, &hdev->cmd_work);
270 }
271 skb_queue_purge(&hdev->driver_init);
272
273 switch (hdev->dev_type) {
274 case HCI_BREDR:
275 bredr_init(hdev);
276 break;
277
278 case HCI_AMP:
279 amp_init(hdev);
280 break;
281
282 default:
283 BT_ERR("Unknown device type %d", hdev->dev_type);
284 break;
285 }
286
287}
288
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300289static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
290{
291 BT_DBG("%s", hdev->name);
292
293 /* Read LE buffer size */
294 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
295}
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
298{
299 __u8 scan = opt;
300
301 BT_DBG("%s %x", hdev->name, scan);
302
303 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200304 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
306
307static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
308{
309 __u8 auth = opt;
310
311 BT_DBG("%s %x", hdev->name, auth);
312
313 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200314 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}
316
317static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
318{
319 __u8 encrypt = opt;
320
321 BT_DBG("%s %x", hdev->name, encrypt);
322
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200323 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200324 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200327static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
328{
329 __le16 policy = cpu_to_le16(opt);
330
Marcel Holtmanna418b892008-11-30 12:17:28 +0100331 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200332
333 /* Default link policy */
334 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
335}
336
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900337/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 * Device is held on return. */
339struct hci_dev *hci_dev_get(int index)
340{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200341 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 BT_DBG("%d", index);
344
345 if (index < 0)
346 return NULL;
347
348 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200349 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 if (d->id == index) {
351 hdev = hci_dev_hold(d);
352 break;
353 }
354 }
355 read_unlock(&hci_dev_list_lock);
356 return hdev;
357}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359/* ---- Inquiry support ---- */
360static void inquiry_cache_flush(struct hci_dev *hdev)
361{
362 struct inquiry_cache *cache = &hdev->inq_cache;
363 struct inquiry_entry *next = cache->list, *e;
364
365 BT_DBG("cache %p", cache);
366
367 cache->list = NULL;
368 while ((e = next)) {
369 next = e->next;
370 kfree(e);
371 }
372}
373
374struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
375{
376 struct inquiry_cache *cache = &hdev->inq_cache;
377 struct inquiry_entry *e;
378
379 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
380
381 for (e = cache->list; e; e = e->next)
382 if (!bacmp(&e->data.bdaddr, bdaddr))
383 break;
384 return e;
385}
386
387void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
388{
389 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200390 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
392 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
393
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200394 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
395 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200397 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
398 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200400
401 ie->next = cache->list;
402 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 }
404
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200405 memcpy(&ie->data, data, sizeof(*data));
406 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 cache->timestamp = jiffies;
408}
409
410static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
411{
412 struct inquiry_cache *cache = &hdev->inq_cache;
413 struct inquiry_info *info = (struct inquiry_info *) buf;
414 struct inquiry_entry *e;
415 int copied = 0;
416
417 for (e = cache->list; e && copied < num; e = e->next, copied++) {
418 struct inquiry_data *data = &e->data;
419 bacpy(&info->bdaddr, &data->bdaddr);
420 info->pscan_rep_mode = data->pscan_rep_mode;
421 info->pscan_period_mode = data->pscan_period_mode;
422 info->pscan_mode = data->pscan_mode;
423 memcpy(info->dev_class, data->dev_class, 3);
424 info->clock_offset = data->clock_offset;
425 info++;
426 }
427
428 BT_DBG("cache %p, copied %d", cache, copied);
429 return copied;
430}
431
432static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
433{
434 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
435 struct hci_cp_inquiry cp;
436
437 BT_DBG("%s", hdev->name);
438
439 if (test_bit(HCI_INQUIRY, &hdev->flags))
440 return;
441
442 /* Start Inquiry */
443 memcpy(&cp.lap, &ir->lap, 3);
444 cp.length = ir->length;
445 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200446 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447}
448
449int hci_inquiry(void __user *arg)
450{
451 __u8 __user *ptr = arg;
452 struct hci_inquiry_req ir;
453 struct hci_dev *hdev;
454 int err = 0, do_inquiry = 0, max_rsp;
455 long timeo;
456 __u8 *buf;
457
458 if (copy_from_user(&ir, ptr, sizeof(ir)))
459 return -EFAULT;
460
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200461 hdev = hci_dev_get(ir.dev_id);
462 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 return -ENODEV;
464
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300465 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900466 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200467 inquiry_cache_empty(hdev) ||
468 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 inquiry_cache_flush(hdev);
470 do_inquiry = 1;
471 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300472 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Marcel Holtmann04837f62006-07-03 10:02:33 +0200474 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200475
476 if (do_inquiry) {
477 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
478 if (err < 0)
479 goto done;
480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 /* for unlimited number of responses we will use buffer with 255 entries */
483 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
484
485 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
486 * copy it to the user space.
487 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100488 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200489 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 err = -ENOMEM;
491 goto done;
492 }
493
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300494 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300496 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 BT_DBG("num_rsp %d", ir.num_rsp);
499
500 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
501 ptr += sizeof(ir);
502 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
503 ir.num_rsp))
504 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900505 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 err = -EFAULT;
507
508 kfree(buf);
509
510done:
511 hci_dev_put(hdev);
512 return err;
513}
514
515/* ---- HCI ioctl helpers ---- */
516
517int hci_dev_open(__u16 dev)
518{
519 struct hci_dev *hdev;
520 int ret = 0;
521
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200522 hdev = hci_dev_get(dev);
523 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 return -ENODEV;
525
526 BT_DBG("%s %p", hdev->name, hdev);
527
528 hci_req_lock(hdev);
529
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200530 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
531 ret = -ERFKILL;
532 goto done;
533 }
534
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 if (test_bit(HCI_UP, &hdev->flags)) {
536 ret = -EALREADY;
537 goto done;
538 }
539
540 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
541 set_bit(HCI_RAW, &hdev->flags);
542
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200543 /* Treat all non BR/EDR controllers as raw devices if
544 enable_hs is not set */
545 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100546 set_bit(HCI_RAW, &hdev->flags);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (hdev->open(hdev)) {
549 ret = -EIO;
550 goto done;
551 }
552
553 if (!test_bit(HCI_RAW, &hdev->flags)) {
554 atomic_set(&hdev->cmd_cnt, 1);
555 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200556 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Marcel Holtmann04837f62006-07-03 10:02:33 +0200558 ret = __hci_request(hdev, hci_init_req, 0,
559 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Andre Guedeseead27d2011-06-30 19:20:55 -0300561 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300562 ret = __hci_request(hdev, hci_le_init_req, 0,
563 msecs_to_jiffies(HCI_INIT_TIMEOUT));
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 clear_bit(HCI_INIT, &hdev->flags);
566 }
567
568 if (!ret) {
569 hci_dev_hold(hdev);
570 set_bit(HCI_UP, &hdev->flags);
571 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200572 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300573 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200574 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300575 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200576 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900577 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200579 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200580 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400581 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 skb_queue_purge(&hdev->cmd_q);
584 skb_queue_purge(&hdev->rx_q);
585
586 if (hdev->flush)
587 hdev->flush(hdev);
588
589 if (hdev->sent_cmd) {
590 kfree_skb(hdev->sent_cmd);
591 hdev->sent_cmd = NULL;
592 }
593
594 hdev->close(hdev);
595 hdev->flags = 0;
596 }
597
598done:
599 hci_req_unlock(hdev);
600 hci_dev_put(hdev);
601 return ret;
602}
603
604static int hci_dev_do_close(struct hci_dev *hdev)
605{
606 BT_DBG("%s %p", hdev->name, hdev);
607
608 hci_req_cancel(hdev, ENODEV);
609 hci_req_lock(hdev);
610
611 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300612 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 hci_req_unlock(hdev);
614 return 0;
615 }
616
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200617 /* Flush RX and TX works */
618 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400619 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200621 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200622 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200623 hdev->discov_timeout = 0;
624 }
625
Johan Hedberg32435532011-11-07 22:16:04 +0200626 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200627 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200628
Johan Hedberg7d785252011-12-15 00:47:39 +0200629 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
630 cancel_delayed_work(&hdev->service_cache);
631
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300632 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 inquiry_cache_flush(hdev);
634 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 hci_notify(hdev, HCI_DEV_DOWN);
638
639 if (hdev->flush)
640 hdev->flush(hdev);
641
642 /* Reset device */
643 skb_queue_purge(&hdev->cmd_q);
644 atomic_set(&hdev->cmd_cnt, 1);
645 if (!test_bit(HCI_RAW, &hdev->flags)) {
646 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200647 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200648 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 clear_bit(HCI_INIT, &hdev->flags);
650 }
651
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200652 /* flush cmd work */
653 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 /* Drop queues */
656 skb_queue_purge(&hdev->rx_q);
657 skb_queue_purge(&hdev->cmd_q);
658 skb_queue_purge(&hdev->raw_q);
659
660 /* Drop last sent command */
661 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300662 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 kfree_skb(hdev->sent_cmd);
664 hdev->sent_cmd = NULL;
665 }
666
667 /* After this point our queues are empty
668 * and no tasks are scheduled. */
669 hdev->close(hdev);
670
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300671 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200672 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300673 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 /* Clear flags */
676 hdev->flags = 0;
677
678 hci_req_unlock(hdev);
679
680 hci_dev_put(hdev);
681 return 0;
682}
683
684int hci_dev_close(__u16 dev)
685{
686 struct hci_dev *hdev;
687 int err;
688
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200689 hdev = hci_dev_get(dev);
690 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 return -ENODEV;
692 err = hci_dev_do_close(hdev);
693 hci_dev_put(hdev);
694 return err;
695}
696
697int hci_dev_reset(__u16 dev)
698{
699 struct hci_dev *hdev;
700 int ret = 0;
701
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200702 hdev = hci_dev_get(dev);
703 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return -ENODEV;
705
706 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708 if (!test_bit(HCI_UP, &hdev->flags))
709 goto done;
710
711 /* Drop queues */
712 skb_queue_purge(&hdev->rx_q);
713 skb_queue_purge(&hdev->cmd_q);
714
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300715 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 inquiry_cache_flush(hdev);
717 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300718 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 if (hdev->flush)
721 hdev->flush(hdev);
722
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900723 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300724 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200727 ret = __hci_request(hdev, hci_reset_req, 0,
728 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 hci_req_unlock(hdev);
732 hci_dev_put(hdev);
733 return ret;
734}
735
736int hci_dev_reset_stat(__u16 dev)
737{
738 struct hci_dev *hdev;
739 int ret = 0;
740
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200741 hdev = hci_dev_get(dev);
742 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 return -ENODEV;
744
745 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
746
747 hci_dev_put(hdev);
748
749 return ret;
750}
751
752int hci_dev_cmd(unsigned int cmd, void __user *arg)
753{
754 struct hci_dev *hdev;
755 struct hci_dev_req dr;
756 int err = 0;
757
758 if (copy_from_user(&dr, arg, sizeof(dr)))
759 return -EFAULT;
760
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200761 hdev = hci_dev_get(dr.dev_id);
762 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 return -ENODEV;
764
765 switch (cmd) {
766 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200767 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 break;
770
771 case HCISETENCRYPT:
772 if (!lmp_encrypt_capable(hdev)) {
773 err = -EOPNOTSUPP;
774 break;
775 }
776
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200779 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
780 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 if (err)
782 break;
783 }
784
Marcel Holtmann04837f62006-07-03 10:02:33 +0200785 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
786 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 break;
788
789 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200790 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
791 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 break;
793
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200794 case HCISETLINKPOL:
795 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
796 msecs_to_jiffies(HCI_INIT_TIMEOUT));
797 break;
798
799 case HCISETLINKMODE:
800 hdev->link_mode = ((__u16) dr.dev_opt) &
801 (HCI_LM_MASTER | HCI_LM_ACCEPT);
802 break;
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 case HCISETPTYPE:
805 hdev->pkt_type = (__u16) dr.dev_opt;
806 break;
807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200809 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
810 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 break;
812
813 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200814 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
815 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 break;
817
818 default:
819 err = -EINVAL;
820 break;
821 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 hci_dev_put(hdev);
824 return err;
825}
826
827int hci_get_dev_list(void __user *arg)
828{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200829 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 struct hci_dev_list_req *dl;
831 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 int n = 0, size, err;
833 __u16 dev_num;
834
835 if (get_user(dev_num, (__u16 __user *) arg))
836 return -EFAULT;
837
838 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
839 return -EINVAL;
840
841 size = sizeof(*dl) + dev_num * sizeof(*dr);
842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200843 dl = kzalloc(size, GFP_KERNEL);
844 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 return -ENOMEM;
846
847 dr = dl->dev_req;
848
849 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200850 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200851 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200852 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200853
854 if (!test_bit(HCI_MGMT, &hdev->flags))
855 set_bit(HCI_PAIRABLE, &hdev->flags);
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 (dr + n)->dev_id = hdev->id;
858 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 if (++n >= dev_num)
861 break;
862 }
863 read_unlock_bh(&hci_dev_list_lock);
864
865 dl->dev_num = n;
866 size = sizeof(*dl) + n * sizeof(*dr);
867
868 err = copy_to_user(arg, dl, size);
869 kfree(dl);
870
871 return err ? -EFAULT : 0;
872}
873
874int hci_get_dev_info(void __user *arg)
875{
876 struct hci_dev *hdev;
877 struct hci_dev_info di;
878 int err = 0;
879
880 if (copy_from_user(&di, arg, sizeof(di)))
881 return -EFAULT;
882
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200883 hdev = hci_dev_get(di.dev_id);
884 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 return -ENODEV;
886
Johan Hedberg32435532011-11-07 22:16:04 +0200887 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
888 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200889
Johan Hedbergc542a062011-01-26 13:11:03 +0200890 if (!test_bit(HCI_MGMT, &hdev->flags))
891 set_bit(HCI_PAIRABLE, &hdev->flags);
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 strcpy(di.name, hdev->name);
894 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100895 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 di.flags = hdev->flags;
897 di.pkt_type = hdev->pkt_type;
898 di.acl_mtu = hdev->acl_mtu;
899 di.acl_pkts = hdev->acl_pkts;
900 di.sco_mtu = hdev->sco_mtu;
901 di.sco_pkts = hdev->sco_pkts;
902 di.link_policy = hdev->link_policy;
903 di.link_mode = hdev->link_mode;
904
905 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
906 memcpy(&di.features, &hdev->features, sizeof(di.features));
907
908 if (copy_to_user(arg, &di, sizeof(di)))
909 err = -EFAULT;
910
911 hci_dev_put(hdev);
912
913 return err;
914}
915
916/* ---- Interface to HCI drivers ---- */
917
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200918static int hci_rfkill_set_block(void *data, bool blocked)
919{
920 struct hci_dev *hdev = data;
921
922 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
923
924 if (!blocked)
925 return 0;
926
927 hci_dev_do_close(hdev);
928
929 return 0;
930}
931
932static const struct rfkill_ops hci_rfkill_ops = {
933 .set_block = hci_rfkill_set_block,
934};
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936/* Alloc HCI device */
937struct hci_dev *hci_alloc_dev(void)
938{
939 struct hci_dev *hdev;
940
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200941 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (!hdev)
943 return NULL;
944
David Herrmann0ac7e702011-10-08 14:58:47 +0200945 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 skb_queue_head_init(&hdev->driver_init);
947
948 return hdev;
949}
950EXPORT_SYMBOL(hci_alloc_dev);
951
952/* Free HCI device */
953void hci_free_dev(struct hci_dev *hdev)
954{
955 skb_queue_purge(&hdev->driver_init);
956
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200957 /* will free via device release */
958 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960EXPORT_SYMBOL(hci_free_dev);
961
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200962static void hci_power_on(struct work_struct *work)
963{
964 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
965
966 BT_DBG("%s", hdev->name);
967
968 if (hci_dev_open(hdev->id) < 0)
969 return;
970
971 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -0200972 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +0200973 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200974
975 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200976 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200977}
978
979static void hci_power_off(struct work_struct *work)
980{
Johan Hedberg32435532011-11-07 22:16:04 +0200981 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200983
984 BT_DBG("%s", hdev->name);
985
Johan Hedberg32435532011-11-07 22:16:04 +0200986 clear_bit(HCI_AUTO_OFF, &hdev->flags);
987
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200988 hci_dev_close(hdev->id);
989}
990
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200991static void hci_discov_off(struct work_struct *work)
992{
993 struct hci_dev *hdev;
994 u8 scan = SCAN_PAGE;
995
996 hdev = container_of(work, struct hci_dev, discov_off.work);
997
998 BT_DBG("%s", hdev->name);
999
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001000 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001001
1002 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1003
1004 hdev->discov_timeout = 0;
1005
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001006 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001007}
1008
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001009int hci_uuids_clear(struct hci_dev *hdev)
1010{
1011 struct list_head *p, *n;
1012
1013 list_for_each_safe(p, n, &hdev->uuids) {
1014 struct bt_uuid *uuid;
1015
1016 uuid = list_entry(p, struct bt_uuid, list);
1017
1018 list_del(p);
1019 kfree(uuid);
1020 }
1021
1022 return 0;
1023}
1024
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001025int hci_link_keys_clear(struct hci_dev *hdev)
1026{
1027 struct list_head *p, *n;
1028
1029 list_for_each_safe(p, n, &hdev->link_keys) {
1030 struct link_key *key;
1031
1032 key = list_entry(p, struct link_key, list);
1033
1034 list_del(p);
1035 kfree(key);
1036 }
1037
1038 return 0;
1039}
1040
1041struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1042{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001043 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001044
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001045 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001046 if (bacmp(bdaddr, &k->bdaddr) == 0)
1047 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001048
1049 return NULL;
1050}
1051
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001052static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1053 u8 key_type, u8 old_key_type)
1054{
1055 /* Legacy key */
1056 if (key_type < 0x03)
1057 return 1;
1058
1059 /* Debug keys are insecure so don't store them persistently */
1060 if (key_type == HCI_LK_DEBUG_COMBINATION)
1061 return 0;
1062
1063 /* Changed combination key and there's no previous one */
1064 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1065 return 0;
1066
1067 /* Security mode 3 case */
1068 if (!conn)
1069 return 1;
1070
1071 /* Neither local nor remote side had no-bonding as requirement */
1072 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1073 return 1;
1074
1075 /* Local side had dedicated bonding as requirement */
1076 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1077 return 1;
1078
1079 /* Remote side had dedicated bonding as requirement */
1080 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1081 return 1;
1082
1083 /* If none of the above criteria match, then don't store the key
1084 * persistently */
1085 return 0;
1086}
1087
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001088struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1089{
1090 struct link_key *k;
1091
1092 list_for_each_entry(k, &hdev->link_keys, list) {
1093 struct key_master_id *id;
1094
1095 if (k->type != HCI_LK_SMP_LTK)
1096 continue;
1097
1098 if (k->dlen != sizeof(*id))
1099 continue;
1100
1101 id = (void *) &k->data;
1102 if (id->ediv == ediv &&
1103 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1104 return k;
1105 }
1106
1107 return NULL;
1108}
1109EXPORT_SYMBOL(hci_find_ltk);
1110
1111struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1112 bdaddr_t *bdaddr, u8 type)
1113{
1114 struct link_key *k;
1115
1116 list_for_each_entry(k, &hdev->link_keys, list)
1117 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1118 return k;
1119
1120 return NULL;
1121}
1122EXPORT_SYMBOL(hci_find_link_key_type);
1123
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001124int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1125 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001126{
1127 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001128 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129
1130 old_key = hci_find_link_key(hdev, bdaddr);
1131 if (old_key) {
1132 old_key_type = old_key->type;
1133 key = old_key;
1134 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001135 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001136 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1137 if (!key)
1138 return -ENOMEM;
1139 list_add(&key->list, &hdev->link_keys);
1140 }
1141
1142 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1143
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001144 /* Some buggy controller combinations generate a changed
1145 * combination key for legacy pairing even when there's no
1146 * previous key */
1147 if (type == HCI_LK_CHANGED_COMBINATION &&
1148 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001149 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001150 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001151 if (conn)
1152 conn->key_type = type;
1153 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001154
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001155 bacpy(&key->bdaddr, bdaddr);
1156 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001157 key->pin_len = pin_len;
1158
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001159 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001160 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001161 else
1162 key->type = type;
1163
Johan Hedberg4df378a2011-04-28 11:29:03 -07001164 if (!new_key)
1165 return 0;
1166
1167 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1168
Johan Hedberg744cf192011-11-08 20:40:14 +02001169 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001170
1171 if (!persistent) {
1172 list_del(&key->list);
1173 kfree(key);
1174 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001175
1176 return 0;
1177}
1178
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001179int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001180 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001181{
1182 struct link_key *key, *old_key;
1183 struct key_master_id *id;
1184 u8 old_key_type;
1185
1186 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1187
1188 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1189 if (old_key) {
1190 key = old_key;
1191 old_key_type = old_key->type;
1192 } else {
1193 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1194 if (!key)
1195 return -ENOMEM;
1196 list_add(&key->list, &hdev->link_keys);
1197 old_key_type = 0xff;
1198 }
1199
1200 key->dlen = sizeof(*id);
1201
1202 bacpy(&key->bdaddr, bdaddr);
1203 memcpy(key->val, ltk, sizeof(key->val));
1204 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001205 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001206
1207 id = (void *) &key->data;
1208 id->ediv = ediv;
1209 memcpy(id->rand, rand, sizeof(id->rand));
1210
1211 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001212 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001213
1214 return 0;
1215}
1216
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001217int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218{
1219 struct link_key *key;
1220
1221 key = hci_find_link_key(hdev, bdaddr);
1222 if (!key)
1223 return -ENOENT;
1224
1225 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227 list_del(&key->list);
1228 kfree(key);
1229
1230 return 0;
1231}
1232
Ville Tervo6bd32322011-02-16 16:32:41 +02001233/* HCI command timer function */
1234static void hci_cmd_timer(unsigned long arg)
1235{
1236 struct hci_dev *hdev = (void *) arg;
1237
1238 BT_ERR("%s command tx timeout", hdev->name);
1239 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001240 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001241}
1242
Szymon Janc2763eda2011-03-22 13:12:22 +01001243struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1244 bdaddr_t *bdaddr)
1245{
1246 struct oob_data *data;
1247
1248 list_for_each_entry(data, &hdev->remote_oob_data, list)
1249 if (bacmp(bdaddr, &data->bdaddr) == 0)
1250 return data;
1251
1252 return NULL;
1253}
1254
1255int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1256{
1257 struct oob_data *data;
1258
1259 data = hci_find_remote_oob_data(hdev, bdaddr);
1260 if (!data)
1261 return -ENOENT;
1262
1263 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1264
1265 list_del(&data->list);
1266 kfree(data);
1267
1268 return 0;
1269}
1270
1271int hci_remote_oob_data_clear(struct hci_dev *hdev)
1272{
1273 struct oob_data *data, *n;
1274
1275 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1276 list_del(&data->list);
1277 kfree(data);
1278 }
1279
1280 return 0;
1281}
1282
1283int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1284 u8 *randomizer)
1285{
1286 struct oob_data *data;
1287
1288 data = hci_find_remote_oob_data(hdev, bdaddr);
1289
1290 if (!data) {
1291 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1292 if (!data)
1293 return -ENOMEM;
1294
1295 bacpy(&data->bdaddr, bdaddr);
1296 list_add(&data->list, &hdev->remote_oob_data);
1297 }
1298
1299 memcpy(data->hash, hash, sizeof(data->hash));
1300 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1301
1302 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1303
1304 return 0;
1305}
1306
Antti Julkub2a66aa2011-06-15 12:01:14 +03001307struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1308 bdaddr_t *bdaddr)
1309{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001310 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001311
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001312 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001313 if (bacmp(bdaddr, &b->bdaddr) == 0)
1314 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001315
1316 return NULL;
1317}
1318
1319int hci_blacklist_clear(struct hci_dev *hdev)
1320{
1321 struct list_head *p, *n;
1322
1323 list_for_each_safe(p, n, &hdev->blacklist) {
1324 struct bdaddr_list *b;
1325
1326 b = list_entry(p, struct bdaddr_list, list);
1327
1328 list_del(p);
1329 kfree(b);
1330 }
1331
1332 return 0;
1333}
1334
1335int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336{
1337 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001338
1339 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1340 return -EBADF;
1341
Antti Julku5e762442011-08-25 16:48:02 +03001342 if (hci_blacklist_lookup(hdev, bdaddr))
1343 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001344
1345 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001346 if (!entry)
1347 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001348
1349 bacpy(&entry->bdaddr, bdaddr);
1350
1351 list_add(&entry->list, &hdev->blacklist);
1352
Johan Hedberg744cf192011-11-08 20:40:14 +02001353 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001354}
1355
1356int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357{
1358 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001359
Szymon Janc1ec918c2011-11-16 09:32:21 +01001360 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001361 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001362
1363 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001364 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001365 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001366
1367 list_del(&entry->list);
1368 kfree(entry);
1369
Johan Hedberg744cf192011-11-08 20:40:14 +02001370 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001371}
1372
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001373static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001374{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001375 struct hci_dev *hdev = container_of(work, struct hci_dev,
1376 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001377
1378 hci_dev_lock(hdev);
1379
1380 hci_adv_entries_clear(hdev);
1381
1382 hci_dev_unlock(hdev);
1383}
1384
Andre Guedes76c86862011-05-26 16:23:50 -03001385int hci_adv_entries_clear(struct hci_dev *hdev)
1386{
1387 struct adv_entry *entry, *tmp;
1388
1389 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1390 list_del(&entry->list);
1391 kfree(entry);
1392 }
1393
1394 BT_DBG("%s adv cache cleared", hdev->name);
1395
1396 return 0;
1397}
1398
1399struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400{
1401 struct adv_entry *entry;
1402
1403 list_for_each_entry(entry, &hdev->adv_entries, list)
1404 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1405 return entry;
1406
1407 return NULL;
1408}
1409
1410static inline int is_connectable_adv(u8 evt_type)
1411{
1412 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1413 return 1;
1414
1415 return 0;
1416}
1417
1418int hci_add_adv_entry(struct hci_dev *hdev,
1419 struct hci_ev_le_advertising_info *ev)
1420{
1421 struct adv_entry *entry;
1422
1423 if (!is_connectable_adv(ev->evt_type))
1424 return -EINVAL;
1425
1426 /* Only new entries should be added to adv_entries. So, if
1427 * bdaddr was found, don't add it. */
1428 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1429 return 0;
1430
1431 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1432 if (!entry)
1433 return -ENOMEM;
1434
1435 bacpy(&entry->bdaddr, &ev->bdaddr);
1436 entry->bdaddr_type = ev->bdaddr_type;
1437
1438 list_add(&entry->list, &hdev->adv_entries);
1439
1440 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1441 batostr(&entry->bdaddr), entry->bdaddr_type);
1442
1443 return 0;
1444}
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446/* Register HCI device */
1447int hci_register_dev(struct hci_dev *hdev)
1448{
1449 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001450 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001452 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1453 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 if (!hdev->open || !hdev->close || !hdev->destruct)
1456 return -EINVAL;
1457
Mat Martineau08add512011-11-02 16:18:36 -07001458 /* Do not allow HCI_AMP devices to register at index 0,
1459 * so the index can be used as the AMP controller ID.
1460 */
1461 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 write_lock_bh(&hci_dev_list_lock);
1464
1465 /* Find first available device id */
1466 list_for_each(p, &hci_dev_list) {
1467 if (list_entry(p, struct hci_dev, list)->id != id)
1468 break;
1469 head = p; id++;
1470 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 sprintf(hdev->name, "hci%d", id);
1473 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001474 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001477 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001480 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001482 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001484 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Marcel Holtmann04837f62006-07-03 10:02:33 +02001486 hdev->idle_timeout = 0;
1487 hdev->sniff_max_interval = 800;
1488 hdev->sniff_min_interval = 80;
1489
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001490 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001491 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001492 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 skb_queue_head_init(&hdev->rx_q);
1496 skb_queue_head_init(&hdev->cmd_q);
1497 skb_queue_head_init(&hdev->raw_q);
1498
Ville Tervo6bd32322011-02-16 16:32:41 +02001499 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1500
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301501 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001502 hdev->reassembly[i] = NULL;
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001505 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
1507 inquiry_cache_init(hdev);
1508
1509 hci_conn_hash_init(hdev);
1510
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001511 INIT_LIST_HEAD(&hdev->mgmt_pending);
1512
David Millerea4bd8b2010-07-30 21:54:49 -07001513 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001514
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001515 INIT_LIST_HEAD(&hdev->uuids);
1516
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001517 INIT_LIST_HEAD(&hdev->link_keys);
1518
Szymon Janc2763eda2011-03-22 13:12:22 +01001519 INIT_LIST_HEAD(&hdev->remote_oob_data);
1520
Andre Guedes76c86862011-05-26 16:23:50 -03001521 INIT_LIST_HEAD(&hdev->adv_entries);
1522
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001523 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001524 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001525 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001526
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001527 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1530
1531 atomic_set(&hdev->promisc, 0);
1532
1533 write_unlock_bh(&hci_dev_list_lock);
1534
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001535 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1536 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001537 if (!hdev->workqueue) {
1538 error = -ENOMEM;
1539 goto err;
1540 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001541
David Herrmann33ca9542011-10-08 14:58:49 +02001542 error = hci_add_sysfs(hdev);
1543 if (error < 0)
1544 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001546 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1547 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1548 if (hdev->rfkill) {
1549 if (rfkill_register(hdev->rfkill) < 0) {
1550 rfkill_destroy(hdev->rfkill);
1551 hdev->rfkill = NULL;
1552 }
1553 }
1554
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001555 set_bit(HCI_AUTO_OFF, &hdev->flags);
1556 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001557 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 hci_notify(hdev, HCI_DEV_REG);
1560
1561 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001562
David Herrmann33ca9542011-10-08 14:58:49 +02001563err_wqueue:
1564 destroy_workqueue(hdev->workqueue);
1565err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001566 write_lock_bh(&hci_dev_list_lock);
1567 list_del(&hdev->list);
1568 write_unlock_bh(&hci_dev_list_lock);
1569
David Herrmann33ca9542011-10-08 14:58:49 +02001570 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
1572EXPORT_SYMBOL(hci_register_dev);
1573
1574/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001575void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576{
Marcel Holtmannef222012007-07-11 06:42:04 +02001577 int i;
1578
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001579 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 write_lock_bh(&hci_dev_list_lock);
1582 list_del(&hdev->list);
1583 write_unlock_bh(&hci_dev_list_lock);
1584
1585 hci_dev_do_close(hdev);
1586
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301587 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001588 kfree_skb(hdev->reassembly[i]);
1589
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001590 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001591 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001592 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001593 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001594 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001595 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001596
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001597 /* mgmt_index_removed should take care of emptying the
1598 * pending list */
1599 BUG_ON(!list_empty(&hdev->mgmt_pending));
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 hci_notify(hdev, HCI_DEV_UNREG);
1602
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001603 if (hdev->rfkill) {
1604 rfkill_unregister(hdev->rfkill);
1605 rfkill_destroy(hdev->rfkill);
1606 }
1607
David Herrmannce242972011-10-08 14:58:48 +02001608 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001609
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001610 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001611
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001612 destroy_workqueue(hdev->workqueue);
1613
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001614 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001615 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001616 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001617 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001618 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001619 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001620 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001621
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
1624EXPORT_SYMBOL(hci_unregister_dev);
1625
1626/* Suspend HCI device */
1627int hci_suspend_dev(struct hci_dev *hdev)
1628{
1629 hci_notify(hdev, HCI_DEV_SUSPEND);
1630 return 0;
1631}
1632EXPORT_SYMBOL(hci_suspend_dev);
1633
1634/* Resume HCI device */
1635int hci_resume_dev(struct hci_dev *hdev)
1636{
1637 hci_notify(hdev, HCI_DEV_RESUME);
1638 return 0;
1639}
1640EXPORT_SYMBOL(hci_resume_dev);
1641
Marcel Holtmann76bca882009-11-18 00:40:39 +01001642/* Receive frame from HCI drivers */
1643int hci_recv_frame(struct sk_buff *skb)
1644{
1645 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1646 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1647 && !test_bit(HCI_INIT, &hdev->flags))) {
1648 kfree_skb(skb);
1649 return -ENXIO;
1650 }
1651
1652 /* Incomming skb */
1653 bt_cb(skb)->incoming = 1;
1654
1655 /* Time stamp */
1656 __net_timestamp(skb);
1657
Marcel Holtmann76bca882009-11-18 00:40:39 +01001658 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001659 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001660
Marcel Holtmann76bca882009-11-18 00:40:39 +01001661 return 0;
1662}
1663EXPORT_SYMBOL(hci_recv_frame);
1664
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301665static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001666 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301667{
1668 int len = 0;
1669 int hlen = 0;
1670 int remain = count;
1671 struct sk_buff *skb;
1672 struct bt_skb_cb *scb;
1673
1674 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1675 index >= NUM_REASSEMBLY)
1676 return -EILSEQ;
1677
1678 skb = hdev->reassembly[index];
1679
1680 if (!skb) {
1681 switch (type) {
1682 case HCI_ACLDATA_PKT:
1683 len = HCI_MAX_FRAME_SIZE;
1684 hlen = HCI_ACL_HDR_SIZE;
1685 break;
1686 case HCI_EVENT_PKT:
1687 len = HCI_MAX_EVENT_SIZE;
1688 hlen = HCI_EVENT_HDR_SIZE;
1689 break;
1690 case HCI_SCODATA_PKT:
1691 len = HCI_MAX_SCO_SIZE;
1692 hlen = HCI_SCO_HDR_SIZE;
1693 break;
1694 }
1695
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001696 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301697 if (!skb)
1698 return -ENOMEM;
1699
1700 scb = (void *) skb->cb;
1701 scb->expect = hlen;
1702 scb->pkt_type = type;
1703
1704 skb->dev = (void *) hdev;
1705 hdev->reassembly[index] = skb;
1706 }
1707
1708 while (count) {
1709 scb = (void *) skb->cb;
1710 len = min(scb->expect, (__u16)count);
1711
1712 memcpy(skb_put(skb, len), data, len);
1713
1714 count -= len;
1715 data += len;
1716 scb->expect -= len;
1717 remain = count;
1718
1719 switch (type) {
1720 case HCI_EVENT_PKT:
1721 if (skb->len == HCI_EVENT_HDR_SIZE) {
1722 struct hci_event_hdr *h = hci_event_hdr(skb);
1723 scb->expect = h->plen;
1724
1725 if (skb_tailroom(skb) < scb->expect) {
1726 kfree_skb(skb);
1727 hdev->reassembly[index] = NULL;
1728 return -ENOMEM;
1729 }
1730 }
1731 break;
1732
1733 case HCI_ACLDATA_PKT:
1734 if (skb->len == HCI_ACL_HDR_SIZE) {
1735 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1736 scb->expect = __le16_to_cpu(h->dlen);
1737
1738 if (skb_tailroom(skb) < scb->expect) {
1739 kfree_skb(skb);
1740 hdev->reassembly[index] = NULL;
1741 return -ENOMEM;
1742 }
1743 }
1744 break;
1745
1746 case HCI_SCODATA_PKT:
1747 if (skb->len == HCI_SCO_HDR_SIZE) {
1748 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1749 scb->expect = h->dlen;
1750
1751 if (skb_tailroom(skb) < scb->expect) {
1752 kfree_skb(skb);
1753 hdev->reassembly[index] = NULL;
1754 return -ENOMEM;
1755 }
1756 }
1757 break;
1758 }
1759
1760 if (scb->expect == 0) {
1761 /* Complete frame */
1762
1763 bt_cb(skb)->pkt_type = type;
1764 hci_recv_frame(skb);
1765
1766 hdev->reassembly[index] = NULL;
1767 return remain;
1768 }
1769 }
1770
1771 return remain;
1772}
1773
Marcel Holtmannef222012007-07-11 06:42:04 +02001774int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1775{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301776 int rem = 0;
1777
Marcel Holtmannef222012007-07-11 06:42:04 +02001778 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1779 return -EILSEQ;
1780
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001781 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001782 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301783 if (rem < 0)
1784 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001785
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301786 data += (count - rem);
1787 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001788 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001789
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301790 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001791}
1792EXPORT_SYMBOL(hci_recv_fragment);
1793
Suraj Sumangala99811512010-07-14 13:02:19 +05301794#define STREAM_REASSEMBLY 0
1795
1796int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1797{
1798 int type;
1799 int rem = 0;
1800
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001801 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301802 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1803
1804 if (!skb) {
1805 struct { char type; } *pkt;
1806
1807 /* Start of the frame */
1808 pkt = data;
1809 type = pkt->type;
1810
1811 data++;
1812 count--;
1813 } else
1814 type = bt_cb(skb)->pkt_type;
1815
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001816 rem = hci_reassembly(hdev, type, data, count,
1817 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301818 if (rem < 0)
1819 return rem;
1820
1821 data += (count - rem);
1822 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001823 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301824
1825 return rem;
1826}
1827EXPORT_SYMBOL(hci_recv_stream_fragment);
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829/* ---- Interface to upper protocols ---- */
1830
1831/* Register/Unregister protocols.
1832 * hci_task_lock is used to ensure that no tasks are running. */
1833int hci_register_proto(struct hci_proto *hp)
1834{
1835 int err = 0;
1836
1837 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1838
1839 if (hp->id >= HCI_MAX_PROTO)
1840 return -EINVAL;
1841
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001842 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
1844 if (!hci_proto[hp->id])
1845 hci_proto[hp->id] = hp;
1846 else
1847 err = -EEXIST;
1848
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001849 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
1851 return err;
1852}
1853EXPORT_SYMBOL(hci_register_proto);
1854
1855int hci_unregister_proto(struct hci_proto *hp)
1856{
1857 int err = 0;
1858
1859 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1860
1861 if (hp->id >= HCI_MAX_PROTO)
1862 return -EINVAL;
1863
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001864 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
1866 if (hci_proto[hp->id])
1867 hci_proto[hp->id] = NULL;
1868 else
1869 err = -ENOENT;
1870
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02001871 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
1873 return err;
1874}
1875EXPORT_SYMBOL(hci_unregister_proto);
1876
1877int hci_register_cb(struct hci_cb *cb)
1878{
1879 BT_DBG("%p name %s", cb, cb->name);
1880
1881 write_lock_bh(&hci_cb_list_lock);
1882 list_add(&cb->list, &hci_cb_list);
1883 write_unlock_bh(&hci_cb_list_lock);
1884
1885 return 0;
1886}
1887EXPORT_SYMBOL(hci_register_cb);
1888
1889int hci_unregister_cb(struct hci_cb *cb)
1890{
1891 BT_DBG("%p name %s", cb, cb->name);
1892
1893 write_lock_bh(&hci_cb_list_lock);
1894 list_del(&cb->list);
1895 write_unlock_bh(&hci_cb_list_lock);
1896
1897 return 0;
1898}
1899EXPORT_SYMBOL(hci_unregister_cb);
1900
1901static int hci_send_frame(struct sk_buff *skb)
1902{
1903 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1904
1905 if (!hdev) {
1906 kfree_skb(skb);
1907 return -ENODEV;
1908 }
1909
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001910 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
1912 if (atomic_read(&hdev->promisc)) {
1913 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001914 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001916 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 }
1918
1919 /* Get rid of skb owner, prior to sending to the driver. */
1920 skb_orphan(skb);
1921
1922 return hdev->send(skb);
1923}
1924
1925/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001926int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927{
1928 int len = HCI_COMMAND_HDR_SIZE + plen;
1929 struct hci_command_hdr *hdr;
1930 struct sk_buff *skb;
1931
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001932 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
1934 skb = bt_skb_alloc(len, GFP_ATOMIC);
1935 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001936 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 return -ENOMEM;
1938 }
1939
1940 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001941 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 hdr->plen = plen;
1943
1944 if (plen)
1945 memcpy(skb_put(skb, plen), param, plen);
1946
1947 BT_DBG("skb len %d", skb->len);
1948
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001949 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001951
Johan Hedberga5040ef2011-01-10 13:28:59 +02001952 if (test_bit(HCI_INIT, &hdev->flags))
1953 hdev->init_last_cmd = opcode;
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001956 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
1958 return 0;
1959}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001962void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
1964 struct hci_command_hdr *hdr;
1965
1966 if (!hdev->sent_cmd)
1967 return NULL;
1968
1969 hdr = (void *) hdev->sent_cmd->data;
1970
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001971 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 return NULL;
1973
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001974 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1977}
1978
1979/* Send ACL data */
1980static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1981{
1982 struct hci_acl_hdr *hdr;
1983 int len = skb->len;
1984
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001985 skb_push(skb, HCI_ACL_HDR_SIZE);
1986 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001987 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001988 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1989 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
1991
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001992static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1993 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994{
1995 struct hci_dev *hdev = conn->hdev;
1996 struct sk_buff *list;
1997
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001998 list = skb_shinfo(skb)->frag_list;
1999 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 /* Non fragmented */
2001 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2002
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002003 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 } else {
2005 /* Fragmented */
2006 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2007
2008 skb_shinfo(skb)->frag_list = NULL;
2009
2010 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002011 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002013 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002014
2015 flags &= ~ACL_START;
2016 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 do {
2018 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002022 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2025
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002026 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 } while (list);
2028
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002029 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002031}
2032
2033void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2034{
2035 struct hci_conn *conn = chan->conn;
2036 struct hci_dev *hdev = conn->hdev;
2037
2038 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2039
2040 skb->dev = (void *) hdev;
2041 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2042 hci_add_acl_hdr(skb, conn->handle, flags);
2043
2044 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002046 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047}
2048EXPORT_SYMBOL(hci_send_acl);
2049
2050/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002051void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
2053 struct hci_dev *hdev = conn->hdev;
2054 struct hci_sco_hdr hdr;
2055
2056 BT_DBG("%s len %d", hdev->name, skb->len);
2057
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002058 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 hdr.dlen = skb->len;
2060
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002061 skb_push(skb, HCI_SCO_HDR_SIZE);
2062 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002063 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
2065 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002066 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002067
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002069 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070}
2071EXPORT_SYMBOL(hci_send_sco);
2072
2073/* ---- HCI TX task (outgoing data) ---- */
2074
2075/* HCI Connection scheduler */
2076static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2077{
2078 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002079 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002082 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002084
2085 rcu_read_lock();
2086
2087 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002088 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002090
2091 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2092 continue;
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 num++;
2095
2096 if (c->sent < min) {
2097 min = c->sent;
2098 conn = c;
2099 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002100
2101 if (hci_conn_num(hdev, type) == num)
2102 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 }
2104
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002105 rcu_read_unlock();
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002108 int cnt, q;
2109
2110 switch (conn->type) {
2111 case ACL_LINK:
2112 cnt = hdev->acl_cnt;
2113 break;
2114 case SCO_LINK:
2115 case ESCO_LINK:
2116 cnt = hdev->sco_cnt;
2117 break;
2118 case LE_LINK:
2119 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2120 break;
2121 default:
2122 cnt = 0;
2123 BT_ERR("Unknown link type");
2124 }
2125
2126 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 *quote = q ? q : 1;
2128 } else
2129 *quote = 0;
2130
2131 BT_DBG("conn %p quote %d", conn, *quote);
2132 return conn;
2133}
2134
Ville Tervobae1f5d2011-02-10 22:38:53 -03002135static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136{
2137 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002138 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
Ville Tervobae1f5d2011-02-10 22:38:53 -03002140 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002142 rcu_read_lock();
2143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002145 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002146 if (c->type == type && c->sent) {
2147 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 hdev->name, batostr(&c->dst));
2149 hci_acl_disconn(c, 0x13);
2150 }
2151 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002152
2153 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154}
2155
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002156static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2157 int *quote)
2158{
2159 struct hci_conn_hash *h = &hdev->conn_hash;
2160 struct hci_chan *chan = NULL;
2161 int num = 0, min = ~0, cur_prio = 0;
2162 struct hci_conn *conn;
2163 int cnt, q, conn_num = 0;
2164
2165 BT_DBG("%s", hdev->name);
2166
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002167 rcu_read_lock();
2168
2169 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002170 struct hci_chan *tmp;
2171
2172 if (conn->type != type)
2173 continue;
2174
2175 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2176 continue;
2177
2178 conn_num++;
2179
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002180 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002181 struct sk_buff *skb;
2182
2183 if (skb_queue_empty(&tmp->data_q))
2184 continue;
2185
2186 skb = skb_peek(&tmp->data_q);
2187 if (skb->priority < cur_prio)
2188 continue;
2189
2190 if (skb->priority > cur_prio) {
2191 num = 0;
2192 min = ~0;
2193 cur_prio = skb->priority;
2194 }
2195
2196 num++;
2197
2198 if (conn->sent < min) {
2199 min = conn->sent;
2200 chan = tmp;
2201 }
2202 }
2203
2204 if (hci_conn_num(hdev, type) == conn_num)
2205 break;
2206 }
2207
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002208 rcu_read_unlock();
2209
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002210 if (!chan)
2211 return NULL;
2212
2213 switch (chan->conn->type) {
2214 case ACL_LINK:
2215 cnt = hdev->acl_cnt;
2216 break;
2217 case SCO_LINK:
2218 case ESCO_LINK:
2219 cnt = hdev->sco_cnt;
2220 break;
2221 case LE_LINK:
2222 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2223 break;
2224 default:
2225 cnt = 0;
2226 BT_ERR("Unknown link type");
2227 }
2228
2229 q = cnt / num;
2230 *quote = q ? q : 1;
2231 BT_DBG("chan %p quote %d", chan, *quote);
2232 return chan;
2233}
2234
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002235static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2236{
2237 struct hci_conn_hash *h = &hdev->conn_hash;
2238 struct hci_conn *conn;
2239 int num = 0;
2240
2241 BT_DBG("%s", hdev->name);
2242
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002243 rcu_read_lock();
2244
2245 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002246 struct hci_chan *chan;
2247
2248 if (conn->type != type)
2249 continue;
2250
2251 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2252 continue;
2253
2254 num++;
2255
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002256 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002257 struct sk_buff *skb;
2258
2259 if (chan->sent) {
2260 chan->sent = 0;
2261 continue;
2262 }
2263
2264 if (skb_queue_empty(&chan->data_q))
2265 continue;
2266
2267 skb = skb_peek(&chan->data_q);
2268 if (skb->priority >= HCI_PRIO_MAX - 1)
2269 continue;
2270
2271 skb->priority = HCI_PRIO_MAX - 1;
2272
2273 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2274 skb->priority);
2275 }
2276
2277 if (hci_conn_num(hdev, type) == num)
2278 break;
2279 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002280
2281 rcu_read_unlock();
2282
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002283}
2284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285static inline void hci_sched_acl(struct hci_dev *hdev)
2286{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002287 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 struct sk_buff *skb;
2289 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002290 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
2292 BT_DBG("%s", hdev->name);
2293
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002294 if (!hci_conn_num(hdev, ACL_LINK))
2295 return;
2296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 if (!test_bit(HCI_RAW, &hdev->flags)) {
2298 /* ACL tx timeout must be longer than maximum
2299 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08002300 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002301 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 }
2303
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002304 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002305
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002306 while (hdev->acl_cnt &&
2307 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002308 u32 priority = (skb_peek(&chan->data_q))->priority;
2309 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002310 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2311 skb->len, skb->priority);
2312
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002313 /* Stop if priority has changed */
2314 if (skb->priority < priority)
2315 break;
2316
2317 skb = skb_dequeue(&chan->data_q);
2318
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002319 hci_conn_enter_active_mode(chan->conn,
2320 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 hci_send_frame(skb);
2323 hdev->acl_last_tx = jiffies;
2324
2325 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002326 chan->sent++;
2327 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 }
2329 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002330
2331 if (cnt != hdev->acl_cnt)
2332 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333}
2334
2335/* Schedule SCO */
2336static inline void hci_sched_sco(struct hci_dev *hdev)
2337{
2338 struct hci_conn *conn;
2339 struct sk_buff *skb;
2340 int quote;
2341
2342 BT_DBG("%s", hdev->name);
2343
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002344 if (!hci_conn_num(hdev, SCO_LINK))
2345 return;
2346
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2348 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2349 BT_DBG("skb %p len %d", skb, skb->len);
2350 hci_send_frame(skb);
2351
2352 conn->sent++;
2353 if (conn->sent == ~0)
2354 conn->sent = 0;
2355 }
2356 }
2357}
2358
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002359static inline void hci_sched_esco(struct hci_dev *hdev)
2360{
2361 struct hci_conn *conn;
2362 struct sk_buff *skb;
2363 int quote;
2364
2365 BT_DBG("%s", hdev->name);
2366
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002367 if (!hci_conn_num(hdev, ESCO_LINK))
2368 return;
2369
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002370 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2371 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2372 BT_DBG("skb %p len %d", skb, skb->len);
2373 hci_send_frame(skb);
2374
2375 conn->sent++;
2376 if (conn->sent == ~0)
2377 conn->sent = 0;
2378 }
2379 }
2380}
2381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002382static inline void hci_sched_le(struct hci_dev *hdev)
2383{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002384 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002385 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002386 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002387
2388 BT_DBG("%s", hdev->name);
2389
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002390 if (!hci_conn_num(hdev, LE_LINK))
2391 return;
2392
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002393 if (!test_bit(HCI_RAW, &hdev->flags)) {
2394 /* LE tx timeout must be longer than maximum
2395 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002396 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002397 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002398 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002399 }
2400
2401 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002402 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002403 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002404 u32 priority = (skb_peek(&chan->data_q))->priority;
2405 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002406 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2407 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002408
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002409 /* Stop if priority has changed */
2410 if (skb->priority < priority)
2411 break;
2412
2413 skb = skb_dequeue(&chan->data_q);
2414
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002415 hci_send_frame(skb);
2416 hdev->le_last_tx = jiffies;
2417
2418 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002419 chan->sent++;
2420 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002421 }
2422 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002423
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002424 if (hdev->le_pkts)
2425 hdev->le_cnt = cnt;
2426 else
2427 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002428
2429 if (cnt != tmp)
2430 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002431}
2432
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002433static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002435 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 struct sk_buff *skb;
2437
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002438 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002440 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2441 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
2443 /* Schedule queues and send stuff to HCI driver */
2444
2445 hci_sched_acl(hdev);
2446
2447 hci_sched_sco(hdev);
2448
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002449 hci_sched_esco(hdev);
2450
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002451 hci_sched_le(hdev);
2452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 /* Send next queued raw (unknown type) packet */
2454 while ((skb = skb_dequeue(&hdev->raw_q)))
2455 hci_send_frame(skb);
2456
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002457 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458}
2459
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002460/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
2462/* ACL data packet */
2463static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2464{
2465 struct hci_acl_hdr *hdr = (void *) skb->data;
2466 struct hci_conn *conn;
2467 __u16 handle, flags;
2468
2469 skb_pull(skb, HCI_ACL_HDR_SIZE);
2470
2471 handle = __le16_to_cpu(hdr->handle);
2472 flags = hci_flags(handle);
2473 handle = hci_handle(handle);
2474
2475 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2476
2477 hdev->stat.acl_rx++;
2478
2479 hci_dev_lock(hdev);
2480 conn = hci_conn_hash_lookup_handle(hdev, handle);
2481 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 if (conn) {
2484 register struct hci_proto *hp;
2485
Mat Martineau65983fc2011-12-13 15:06:02 -08002486 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002489 hp = hci_proto[HCI_PROTO_L2CAP];
2490 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 hp->recv_acldata(conn, skb, flags);
2492 return;
2493 }
2494 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002495 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 hdev->name, handle);
2497 }
2498
2499 kfree_skb(skb);
2500}
2501
2502/* SCO data packet */
2503static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2504{
2505 struct hci_sco_hdr *hdr = (void *) skb->data;
2506 struct hci_conn *conn;
2507 __u16 handle;
2508
2509 skb_pull(skb, HCI_SCO_HDR_SIZE);
2510
2511 handle = __le16_to_cpu(hdr->handle);
2512
2513 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2514
2515 hdev->stat.sco_rx++;
2516
2517 hci_dev_lock(hdev);
2518 conn = hci_conn_hash_lookup_handle(hdev, handle);
2519 hci_dev_unlock(hdev);
2520
2521 if (conn) {
2522 register struct hci_proto *hp;
2523
2524 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002525 hp = hci_proto[HCI_PROTO_SCO];
2526 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 hp->recv_scodata(conn, skb);
2528 return;
2529 }
2530 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002531 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 hdev->name, handle);
2533 }
2534
2535 kfree_skb(skb);
2536}
2537
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002538static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002540 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 struct sk_buff *skb;
2542
2543 BT_DBG("%s", hdev->name);
2544
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002545 mutex_lock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
2547 while ((skb = skb_dequeue(&hdev->rx_q))) {
2548 if (atomic_read(&hdev->promisc)) {
2549 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002550 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 }
2552
2553 if (test_bit(HCI_RAW, &hdev->flags)) {
2554 kfree_skb(skb);
2555 continue;
2556 }
2557
2558 if (test_bit(HCI_INIT, &hdev->flags)) {
2559 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002560 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 case HCI_ACLDATA_PKT:
2562 case HCI_SCODATA_PKT:
2563 kfree_skb(skb);
2564 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002565 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 }
2567
2568 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002569 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002571 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 hci_event_packet(hdev, skb);
2573 break;
2574
2575 case HCI_ACLDATA_PKT:
2576 BT_DBG("%s ACL data packet", hdev->name);
2577 hci_acldata_packet(hdev, skb);
2578 break;
2579
2580 case HCI_SCODATA_PKT:
2581 BT_DBG("%s SCO data packet", hdev->name);
2582 hci_scodata_packet(hdev, skb);
2583 break;
2584
2585 default:
2586 kfree_skb(skb);
2587 break;
2588 }
2589 }
2590
Gustavo F. Padovan67d0dfb2011-12-09 04:41:30 -02002591 mutex_unlock(&hci_task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592}
2593
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002594static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002596 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 struct sk_buff *skb;
2598
2599 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2600
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002602 if (atomic_read(&hdev->cmd_cnt)) {
2603 skb = skb_dequeue(&hdev->cmd_q);
2604 if (!skb)
2605 return;
2606
Wei Yongjun7585b972009-02-25 18:29:52 +08002607 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002609 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2610 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 atomic_dec(&hdev->cmd_cnt);
2612 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002613 if (test_bit(HCI_RESET, &hdev->flags))
2614 del_timer(&hdev->cmd_timer);
2615 else
2616 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002617 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 } else {
2619 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002620 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 }
2622 }
2623}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002624
2625int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2626{
2627 /* General inquiry access code (GIAC) */
2628 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2629 struct hci_cp_inquiry cp;
2630
2631 BT_DBG("%s", hdev->name);
2632
2633 if (test_bit(HCI_INQUIRY, &hdev->flags))
2634 return -EINPROGRESS;
2635
2636 memset(&cp, 0, sizeof(cp));
2637 memcpy(&cp.lap, lap, sizeof(cp.lap));
2638 cp.length = length;
2639
2640 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2641}
Andre Guedes023d5042011-11-04 14:16:52 -03002642
2643int hci_cancel_inquiry(struct hci_dev *hdev)
2644{
2645 BT_DBG("%s", hdev->name);
2646
2647 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2648 return -EPERM;
2649
2650 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2651}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002652
2653module_param(enable_hs, bool, 0644);
2654MODULE_PARM_DESC(enable_hs, "Enable High Speed");