blob: 94d5342a4da6a491bde602c023db674bf1d557d5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700190static int link_keys_show(struct seq_file *f, void *ptr)
191{
192 struct hci_dev *hdev = f->private;
193 struct list_head *p, *n;
194
195 hci_dev_lock(hdev);
196 list_for_each_safe(p, n, &hdev->link_keys) {
197 struct link_key *key = list_entry(p, struct link_key, list);
198 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
199 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
200 }
201 hci_dev_unlock(hdev);
202
203 return 0;
204}
205
206static int link_keys_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, link_keys_show, inode->i_private);
209}
210
211static const struct file_operations link_keys_fops = {
212 .open = link_keys_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700218static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
219 size_t count, loff_t *ppos)
220{
221 struct hci_dev *hdev = file->private_data;
222 char buf[3];
223
224 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
225 buf[1] = '\n';
226 buf[2] = '\0';
227 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
228}
229
230static const struct file_operations use_debug_keys_fops = {
231 .open = simple_open,
232 .read = use_debug_keys_read,
233 .llseek = default_llseek,
234};
235
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700236static int dev_class_show(struct seq_file *f, void *ptr)
237{
238 struct hci_dev *hdev = f->private;
239
240 hci_dev_lock(hdev);
241 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
242 hdev->dev_class[1], hdev->dev_class[0]);
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int dev_class_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, dev_class_show, inode->i_private);
251}
252
253static const struct file_operations dev_class_fops = {
254 .open = dev_class_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann041000b2013-10-17 12:02:31 -0700260static int voice_setting_get(void *data, u64 *val)
261{
262 struct hci_dev *hdev = data;
263
264 hci_dev_lock(hdev);
265 *val = hdev->voice_setting;
266 hci_dev_unlock(hdev);
267
268 return 0;
269}
270
271DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
272 NULL, "0x%4.4llx\n");
273
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700274static int auto_accept_delay_set(void *data, u64 val)
275{
276 struct hci_dev *hdev = data;
277
278 hci_dev_lock(hdev);
279 hdev->auto_accept_delay = val;
280 hci_dev_unlock(hdev);
281
282 return 0;
283}
284
285static int auto_accept_delay_get(void *data, u64 *val)
286{
287 struct hci_dev *hdev = data;
288
289 hci_dev_lock(hdev);
290 *val = hdev->auto_accept_delay;
291 hci_dev_unlock(hdev);
292
293 return 0;
294}
295
296DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
297 auto_accept_delay_set, "%llu\n");
298
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700299static int idle_timeout_set(void *data, u64 val)
300{
301 struct hci_dev *hdev = data;
302
303 if (val != 0 && (val < 500 || val > 3600000))
304 return -EINVAL;
305
306 hci_dev_lock(hdev);
307 hdev->idle_timeout= val;
308 hci_dev_unlock(hdev);
309
310 return 0;
311}
312
313static int idle_timeout_get(void *data, u64 *val)
314{
315 struct hci_dev *hdev = data;
316
317 hci_dev_lock(hdev);
318 *val = hdev->idle_timeout;
319 hci_dev_unlock(hdev);
320
321 return 0;
322}
323
324DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
325 idle_timeout_set, "%llu\n");
326
327static int sniff_min_interval_set(void *data, u64 val)
328{
329 struct hci_dev *hdev = data;
330
331 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
332 return -EINVAL;
333
334 hci_dev_lock(hdev);
335 hdev->sniff_min_interval= val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int sniff_min_interval_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->sniff_min_interval;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
353 sniff_min_interval_set, "%llu\n");
354
355static int sniff_max_interval_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358
359 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
360 return -EINVAL;
361
362 hci_dev_lock(hdev);
363 hdev->sniff_max_interval= val;
364 hci_dev_unlock(hdev);
365
366 return 0;
367}
368
369static int sniff_max_interval_get(void *data, u64 *val)
370{
371 struct hci_dev *hdev = data;
372
373 hci_dev_lock(hdev);
374 *val = hdev->sniff_max_interval;
375 hci_dev_unlock(hdev);
376
377 return 0;
378}
379
380DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
381 sniff_max_interval_set, "%llu\n");
382
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700383static int static_address_show(struct seq_file *f, void *p)
384{
385 struct hci_dev *hdev = f->private;
386
387 hci_dev_lock(hdev);
388 seq_printf(f, "%pMR\n", &hdev->static_addr);
389 hci_dev_unlock(hdev);
390
391 return 0;
392}
393
394static int static_address_open(struct inode *inode, struct file *file)
395{
396 return single_open(file, static_address_show, inode->i_private);
397}
398
399static const struct file_operations static_address_fops = {
400 .open = static_address_open,
401 .read = seq_read,
402 .llseek = seq_lseek,
403 .release = single_release,
404};
405
Marcel Holtmann92202182013-10-18 16:38:10 -0700406static int own_address_type_set(void *data, u64 val)
407{
408 struct hci_dev *hdev = data;
409
410 if (val != 0 && val != 1)
411 return -EINVAL;
412
413 hci_dev_lock(hdev);
414 hdev->own_addr_type = val;
415 hci_dev_unlock(hdev);
416
417 return 0;
418}
419
420static int own_address_type_get(void *data, u64 *val)
421{
422 struct hci_dev *hdev = data;
423
424 hci_dev_lock(hdev);
425 *val = hdev->own_addr_type;
426 hci_dev_unlock(hdev);
427
428 return 0;
429}
430
431DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
432 own_address_type_set, "%llu\n");
433
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700434static int long_term_keys_show(struct seq_file *f, void *ptr)
435{
436 struct hci_dev *hdev = f->private;
437 struct list_head *p, *n;
438
439 hci_dev_lock(hdev);
440 list_for_each_safe(p, n, &hdev->link_keys) {
441 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
442 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
443 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
444 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
445 8, ltk->rand, 16, ltk->val);
446 }
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452static int long_term_keys_open(struct inode *inode, struct file *file)
453{
454 return single_open(file, long_term_keys_show, inode->i_private);
455}
456
457static const struct file_operations long_term_keys_fops = {
458 .open = long_term_keys_open,
459 .read = seq_read,
460 .llseek = seq_lseek,
461 .release = single_release,
462};
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464/* ---- HCI requests ---- */
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 if (hdev->req_status == HCI_REQ_PEND) {
471 hdev->req_result = result;
472 hdev->req_status = HCI_REQ_DONE;
473 wake_up_interruptible(&hdev->req_wait_q);
474 }
475}
476
477static void hci_req_cancel(struct hci_dev *hdev, int err)
478{
479 BT_DBG("%s err 0x%2.2x", hdev->name, err);
480
481 if (hdev->req_status == HCI_REQ_PEND) {
482 hdev->req_result = err;
483 hdev->req_status = HCI_REQ_CANCELED;
484 wake_up_interruptible(&hdev->req_wait_q);
485 }
486}
487
Fengguang Wu77a63e02013-04-20 16:24:31 +0300488static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
489 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300490{
491 struct hci_ev_cmd_complete *ev;
492 struct hci_event_hdr *hdr;
493 struct sk_buff *skb;
494
495 hci_dev_lock(hdev);
496
497 skb = hdev->recv_evt;
498 hdev->recv_evt = NULL;
499
500 hci_dev_unlock(hdev);
501
502 if (!skb)
503 return ERR_PTR(-ENODATA);
504
505 if (skb->len < sizeof(*hdr)) {
506 BT_ERR("Too short HCI event");
507 goto failed;
508 }
509
510 hdr = (void *) skb->data;
511 skb_pull(skb, HCI_EVENT_HDR_SIZE);
512
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300513 if (event) {
514 if (hdr->evt != event)
515 goto failed;
516 return skb;
517 }
518
Johan Hedberg75e84b72013-04-02 13:35:04 +0300519 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
520 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
521 goto failed;
522 }
523
524 if (skb->len < sizeof(*ev)) {
525 BT_ERR("Too short cmd_complete event");
526 goto failed;
527 }
528
529 ev = (void *) skb->data;
530 skb_pull(skb, sizeof(*ev));
531
532 if (opcode == __le16_to_cpu(ev->opcode))
533 return skb;
534
535 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
536 __le16_to_cpu(ev->opcode));
537
538failed:
539 kfree_skb(skb);
540 return ERR_PTR(-ENODATA);
541}
542
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300543struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300544 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300545{
546 DECLARE_WAITQUEUE(wait, current);
547 struct hci_request req;
548 int err = 0;
549
550 BT_DBG("%s", hdev->name);
551
552 hci_req_init(&req, hdev);
553
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300554 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300555
556 hdev->req_status = HCI_REQ_PEND;
557
558 err = hci_req_run(&req, hci_req_sync_complete);
559 if (err < 0)
560 return ERR_PTR(err);
561
562 add_wait_queue(&hdev->req_wait_q, &wait);
563 set_current_state(TASK_INTERRUPTIBLE);
564
565 schedule_timeout(timeout);
566
567 remove_wait_queue(&hdev->req_wait_q, &wait);
568
569 if (signal_pending(current))
570 return ERR_PTR(-EINTR);
571
572 switch (hdev->req_status) {
573 case HCI_REQ_DONE:
574 err = -bt_to_errno(hdev->req_result);
575 break;
576
577 case HCI_REQ_CANCELED:
578 err = -hdev->req_result;
579 break;
580
581 default:
582 err = -ETIMEDOUT;
583 break;
584 }
585
586 hdev->req_status = hdev->req_result = 0;
587
588 BT_DBG("%s end: err %d", hdev->name, err);
589
590 if (err < 0)
591 return ERR_PTR(err);
592
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300593 return hci_get_cmd_complete(hdev, opcode, event);
594}
595EXPORT_SYMBOL(__hci_cmd_sync_ev);
596
597struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300598 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300599{
600 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300601}
602EXPORT_SYMBOL(__hci_cmd_sync);
603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200605static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 void (*func)(struct hci_request *req,
607 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200608 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 DECLARE_WAITQUEUE(wait, current);
612 int err = 0;
613
614 BT_DBG("%s start", hdev->name);
615
Johan Hedberg42c6b122013-03-05 20:37:49 +0200616 hci_req_init(&req, hdev);
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 hdev->req_status = HCI_REQ_PEND;
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200621
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 err = hci_req_run(&req, hci_req_sync_complete);
623 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200624 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300625
626 /* ENODATA means the HCI request command queue is empty.
627 * This can happen when a request with conditionals doesn't
628 * trigger any commands to be sent. This is normal behavior
629 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 */
Andre Guedes920c8302013-03-08 11:20:15 -0300631 if (err == -ENODATA)
632 return 0;
633
634 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200635 }
636
Andre Guedesbc4445c2013-03-08 11:20:13 -0300637 add_wait_queue(&hdev->req_wait_q, &wait);
638 set_current_state(TASK_INTERRUPTIBLE);
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 schedule_timeout(timeout);
641
642 remove_wait_queue(&hdev->req_wait_q, &wait);
643
644 if (signal_pending(current))
645 return -EINTR;
646
647 switch (hdev->req_status) {
648 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700649 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 break;
651
652 case HCI_REQ_CANCELED:
653 err = -hdev->req_result;
654 break;
655
656 default:
657 err = -ETIMEDOUT;
658 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Johan Hedberga5040ef2011-01-10 13:28:59 +0200661 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
663 BT_DBG("%s end: err %d", hdev->name, err);
664
665 return err;
666}
667
Johan Hedberg01178cd2013-03-05 20:37:41 +0200668static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 void (*req)(struct hci_request *req,
670 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200671 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
673 int ret;
674
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200675 if (!test_bit(HCI_UP, &hdev->flags))
676 return -ENETDOWN;
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 /* Serialize all requests */
679 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200680 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 hci_req_unlock(hdev);
682
683 return ret;
684}
685
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 set_bit(HCI_RESET, &req->hdev->flags);
692 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200702 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704
705 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
Johan Hedberg42c6b122013-03-05 20:37:49 +0200709static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200710{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200712
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200713 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200714 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300715
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700716 /* Read Local Supported Commands */
717 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
718
719 /* Read Local Supported Features */
720 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
721
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300722 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200723 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300724
725 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700727
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700728 /* Read Flow Control Mode */
729 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
730
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700731 /* Read Location Data */
732 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200733}
734
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200736{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200738
739 BT_DBG("%s %ld", hdev->name, opt);
740
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300741 /* Reset */
742 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300744
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200745 switch (hdev->dev_type) {
746 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200748 break;
749
750 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200751 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200752 break;
753
754 default:
755 BT_ERR("Unknown device type %d", hdev->dev_type);
756 break;
757 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200758}
759
Johan Hedberg42c6b122013-03-05 20:37:49 +0200760static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200761{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700762 struct hci_dev *hdev = req->hdev;
763
Johan Hedberg2177bab2013-03-05 20:37:43 +0200764 __le16 param;
765 __u8 flt_type;
766
767 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200768 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200769
770 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200771 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200772
773 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200774 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200775
776 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200777 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200778
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700779 /* Read Number of Supported IAC */
780 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
781
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700782 /* Read Current IAC LAP */
783 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
784
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785 /* Clear Event Filters */
786 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200787 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200788
789 /* Connection accept timeout ~20 secs */
790 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200791 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200792
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700793 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
794 * but it does not support page scan related HCI commands.
795 */
796 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500797 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
798 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
799 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200800}
801
Johan Hedberg42c6b122013-03-05 20:37:49 +0200802static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200803{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300804 struct hci_dev *hdev = req->hdev;
805
Johan Hedberg2177bab2013-03-05 20:37:43 +0200806 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200808
809 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200810 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200811
812 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200813 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200814
815 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200816 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200817
818 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200819 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300820
821 /* LE-only controllers have LE implicitly enabled */
822 if (!lmp_bredr_capable(hdev))
823 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200824}
825
826static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
827{
828 if (lmp_ext_inq_capable(hdev))
829 return 0x02;
830
831 if (lmp_inq_rssi_capable(hdev))
832 return 0x01;
833
834 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
835 hdev->lmp_subver == 0x0757)
836 return 0x01;
837
838 if (hdev->manufacturer == 15) {
839 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
840 return 0x01;
841 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
842 return 0x01;
843 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
844 return 0x01;
845 }
846
847 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
848 hdev->lmp_subver == 0x1805)
849 return 0x01;
850
851 return 0x00;
852}
853
Johan Hedberg42c6b122013-03-05 20:37:49 +0200854static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200855{
856 u8 mode;
857
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200859
Johan Hedberg42c6b122013-03-05 20:37:49 +0200860 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200861}
862
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200864{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200865 struct hci_dev *hdev = req->hdev;
866
Johan Hedberg2177bab2013-03-05 20:37:43 +0200867 /* The second byte is 0xff instead of 0x9f (two reserved bits
868 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
869 * command otherwise.
870 */
871 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
872
873 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
874 * any event mask for pre 1.2 devices.
875 */
876 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
877 return;
878
879 if (lmp_bredr_capable(hdev)) {
880 events[4] |= 0x01; /* Flow Specification Complete */
881 events[4] |= 0x02; /* Inquiry Result with RSSI */
882 events[4] |= 0x04; /* Read Remote Extended Features Complete */
883 events[5] |= 0x08; /* Synchronous Connection Complete */
884 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700885 } else {
886 /* Use a different default for LE-only devices */
887 memset(events, 0, sizeof(events));
888 events[0] |= 0x10; /* Disconnection Complete */
889 events[0] |= 0x80; /* Encryption Change */
890 events[1] |= 0x08; /* Read Remote Version Information Complete */
891 events[1] |= 0x20; /* Command Complete */
892 events[1] |= 0x40; /* Command Status */
893 events[1] |= 0x80; /* Hardware Error */
894 events[2] |= 0x04; /* Number of Completed Packets */
895 events[3] |= 0x02; /* Data Buffer Overflow */
896 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200897 }
898
899 if (lmp_inq_rssi_capable(hdev))
900 events[4] |= 0x02; /* Inquiry Result with RSSI */
901
902 if (lmp_sniffsubr_capable(hdev))
903 events[5] |= 0x20; /* Sniff Subrating */
904
905 if (lmp_pause_enc_capable(hdev))
906 events[5] |= 0x80; /* Encryption Key Refresh Complete */
907
908 if (lmp_ext_inq_capable(hdev))
909 events[5] |= 0x40; /* Extended Inquiry Result */
910
911 if (lmp_no_flush_capable(hdev))
912 events[7] |= 0x01; /* Enhanced Flush Complete */
913
914 if (lmp_lsto_capable(hdev))
915 events[6] |= 0x80; /* Link Supervision Timeout Changed */
916
917 if (lmp_ssp_capable(hdev)) {
918 events[6] |= 0x01; /* IO Capability Request */
919 events[6] |= 0x02; /* IO Capability Response */
920 events[6] |= 0x04; /* User Confirmation Request */
921 events[6] |= 0x08; /* User Passkey Request */
922 events[6] |= 0x10; /* Remote OOB Data Request */
923 events[6] |= 0x20; /* Simple Pairing Complete */
924 events[7] |= 0x04; /* User Passkey Notification */
925 events[7] |= 0x08; /* Keypress Notification */
926 events[7] |= 0x10; /* Remote Host Supported
927 * Features Notification
928 */
929 }
930
931 if (lmp_le_capable(hdev))
932 events[7] |= 0x20; /* LE Meta-Event */
933
Johan Hedberg42c6b122013-03-05 20:37:49 +0200934 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200935
936 if (lmp_le_capable(hdev)) {
937 memset(events, 0, sizeof(events));
938 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200939 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
940 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200941 }
942}
943
Johan Hedberg42c6b122013-03-05 20:37:49 +0200944static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200945{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200946 struct hci_dev *hdev = req->hdev;
947
Johan Hedberg2177bab2013-03-05 20:37:43 +0200948 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200949 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300950 else
951 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200952
953 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200954 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200955
Johan Hedberg42c6b122013-03-05 20:37:49 +0200956 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200957
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300958 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
959 * local supported commands HCI command.
960 */
961 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200963
964 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700965 /* When SSP is available, then the host features page
966 * should also be available as well. However some
967 * controllers list the max_page as 0 as long as SSP
968 * has not been enabled. To achieve proper debugging
969 * output, force the minimum max_page to 1 at least.
970 */
971 hdev->max_page = 0x01;
972
Johan Hedberg2177bab2013-03-05 20:37:43 +0200973 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
974 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
976 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200977 } else {
978 struct hci_cp_write_eir cp;
979
980 memset(hdev->eir, 0, sizeof(hdev->eir));
981 memset(&cp, 0, sizeof(cp));
982
Johan Hedberg42c6b122013-03-05 20:37:49 +0200983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200984 }
985 }
986
987 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200989
990 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200992
993 if (lmp_ext_feat_capable(hdev)) {
994 struct hci_cp_read_local_ext_features cp;
995
996 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
998 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200999 }
1000
1001 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1002 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1004 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001005 }
1006}
1007
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001009{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001011 struct hci_cp_write_def_link_policy cp;
1012 u16 link_policy = 0;
1013
1014 if (lmp_rswitch_capable(hdev))
1015 link_policy |= HCI_LP_RSWITCH;
1016 if (lmp_hold_capable(hdev))
1017 link_policy |= HCI_LP_HOLD;
1018 if (lmp_sniff_capable(hdev))
1019 link_policy |= HCI_LP_SNIFF;
1020 if (lmp_park_capable(hdev))
1021 link_policy |= HCI_LP_PARK;
1022
1023 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001030 struct hci_cp_write_le_host_supported cp;
1031
Johan Hedbergc73eee92013-04-19 18:35:21 +03001032 /* LE-only devices do not support explicit enablement */
1033 if (!lmp_bredr_capable(hdev))
1034 return;
1035
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036 memset(&cp, 0, sizeof(cp));
1037
1038 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1039 cp.le = 0x01;
1040 cp.simul = lmp_le_br_capable(hdev);
1041 }
1042
1043 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1045 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001046}
1047
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001048static void hci_set_event_mask_page_2(struct hci_request *req)
1049{
1050 struct hci_dev *hdev = req->hdev;
1051 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1052
1053 /* If Connectionless Slave Broadcast master role is supported
1054 * enable all necessary events for it.
1055 */
1056 if (hdev->features[2][0] & 0x01) {
1057 events[1] |= 0x40; /* Triggered Clock Capture */
1058 events[1] |= 0x80; /* Synchronization Train Complete */
1059 events[2] |= 0x10; /* Slave Page Response Timeout */
1060 events[2] |= 0x20; /* CSB Channel Map Change */
1061 }
1062
1063 /* If Connectionless Slave Broadcast slave role is supported
1064 * enable all necessary events for it.
1065 */
1066 if (hdev->features[2][0] & 0x02) {
1067 events[2] |= 0x01; /* Synchronization Train Received */
1068 events[2] |= 0x02; /* CSB Receive */
1069 events[2] |= 0x04; /* CSB Timeout */
1070 events[2] |= 0x08; /* Truncated Page Complete */
1071 }
1072
1073 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1074}
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001077{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001079 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001081 /* Some Broadcom based Bluetooth controllers do not support the
1082 * Delete Stored Link Key command. They are clearly indicating its
1083 * absence in the bit mask of supported commands.
1084 *
1085 * Check the supported commands and only if the the command is marked
1086 * as supported send it. If not supported assume that the controller
1087 * does not have actual support for stored link keys which makes this
1088 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001089 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001090 if (hdev->commands[6] & 0x80) {
1091 struct hci_cp_delete_stored_link_key cp;
1092
1093 bacpy(&cp.bdaddr, BDADDR_ANY);
1094 cp.delete_all = 0x01;
1095 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1096 sizeof(cp), &cp);
1097 }
1098
Johan Hedberg2177bab2013-03-05 20:37:43 +02001099 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001100 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001101
Marcel Holtmann79830f62013-10-18 16:38:09 -07001102 if (lmp_le_capable(hdev)) {
1103 /* If the controller has a public BD_ADDR, then by
1104 * default use that one. If this is a LE only
1105 * controller without one, default to the random
1106 * address.
1107 */
1108 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1109 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1110 else
1111 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1112
Johan Hedberg42c6b122013-03-05 20:37:49 +02001113 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001114 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001115
1116 /* Read features beyond page 1 if available */
1117 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1118 struct hci_cp_read_local_ext_features cp;
1119
1120 cp.page = p;
1121 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1122 sizeof(cp), &cp);
1123 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001124}
1125
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001126static void hci_init4_req(struct hci_request *req, unsigned long opt)
1127{
1128 struct hci_dev *hdev = req->hdev;
1129
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001130 /* Set event mask page 2 if the HCI command for it is supported */
1131 if (hdev->commands[22] & 0x04)
1132 hci_set_event_mask_page_2(req);
1133
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001134 /* Check for Synchronization Train support */
1135 if (hdev->features[2][0] & 0x04)
1136 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1137}
1138
Johan Hedberg2177bab2013-03-05 20:37:43 +02001139static int __hci_init(struct hci_dev *hdev)
1140{
1141 int err;
1142
1143 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1144 if (err < 0)
1145 return err;
1146
1147 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1148 * BR/EDR/LE type controllers. AMP controllers only need the
1149 * first stage init.
1150 */
1151 if (hdev->dev_type != HCI_BREDR)
1152 return 0;
1153
1154 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1155 if (err < 0)
1156 return err;
1157
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001158 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1159 if (err < 0)
1160 return err;
1161
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001162 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1163 if (err < 0)
1164 return err;
1165
1166 /* Only create debugfs entries during the initial setup
1167 * phase and not every time the controller gets powered on.
1168 */
1169 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1170 return 0;
1171
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001172 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1173 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001174 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1175 &hdev->manufacturer);
1176 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1177 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001178 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1179 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001180 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1181
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001182 if (lmp_bredr_capable(hdev)) {
1183 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1184 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001185 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1186 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001187 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1188 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001189 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1190 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001191 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1192 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001193 }
1194
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001195 if (lmp_ssp_capable(hdev))
1196 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1197 hdev, &auto_accept_delay_fops);
1198
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001199 if (lmp_sniff_capable(hdev)) {
1200 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1201 hdev, &idle_timeout_fops);
1202 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1203 hdev, &sniff_min_interval_fops);
1204 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1205 hdev, &sniff_max_interval_fops);
1206 }
1207
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001208 if (lmp_le_capable(hdev)) {
1209 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1210 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001211 debugfs_create_file("static_address", 0444, hdev->debugfs,
1212 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001213 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1214 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001215 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1216 hdev, &long_term_keys_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001217 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001218
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001219 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
1224 __u8 scan = opt;
1225
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
1234 __u8 auth = opt;
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
1238 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240}
1241
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 __u8 encrypt = opt;
1245
Johan Hedberg42c6b122013-03-05 20:37:49 +02001246 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001248 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
1251
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001253{
1254 __le16 policy = cpu_to_le16(opt);
1255
Johan Hedberg42c6b122013-03-05 20:37:49 +02001256 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001257
1258 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001260}
1261
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001262/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 * Device is held on return. */
1264struct hci_dev *hci_dev_get(int index)
1265{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001266 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 BT_DBG("%d", index);
1269
1270 if (index < 0)
1271 return NULL;
1272
1273 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001274 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 if (d->id == index) {
1276 hdev = hci_dev_hold(d);
1277 break;
1278 }
1279 }
1280 read_unlock(&hci_dev_list_lock);
1281 return hdev;
1282}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
1284/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001285
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001286bool hci_discovery_active(struct hci_dev *hdev)
1287{
1288 struct discovery_state *discov = &hdev->discovery;
1289
Andre Guedes6fbe1952012-02-03 17:47:58 -03001290 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001291 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001292 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001293 return true;
1294
Andre Guedes6fbe1952012-02-03 17:47:58 -03001295 default:
1296 return false;
1297 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001298}
1299
Johan Hedbergff9ef572012-01-04 14:23:45 +02001300void hci_discovery_set_state(struct hci_dev *hdev, int state)
1301{
1302 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1303
1304 if (hdev->discovery.state == state)
1305 return;
1306
1307 switch (state) {
1308 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001309 if (hdev->discovery.state != DISCOVERY_STARTING)
1310 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001311 break;
1312 case DISCOVERY_STARTING:
1313 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001314 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001315 mgmt_discovering(hdev, 1);
1316 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001317 case DISCOVERY_RESOLVING:
1318 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001319 case DISCOVERY_STOPPING:
1320 break;
1321 }
1322
1323 hdev->discovery.state = state;
1324}
1325
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001326void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Johan Hedberg30883512012-01-04 14:16:21 +02001328 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001329 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
Johan Hedberg561aafb2012-01-04 13:31:59 +02001331 list_for_each_entry_safe(p, n, &cache->all, all) {
1332 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001333 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001335
1336 INIT_LIST_HEAD(&cache->unknown);
1337 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338}
1339
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001340struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1341 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
Johan Hedberg30883512012-01-04 14:16:21 +02001343 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 struct inquiry_entry *e;
1345
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001346 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Johan Hedberg561aafb2012-01-04 13:31:59 +02001348 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001350 return e;
1351 }
1352
1353 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354}
1355
Johan Hedberg561aafb2012-01-04 13:31:59 +02001356struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001357 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001358{
Johan Hedberg30883512012-01-04 14:16:21 +02001359 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001360 struct inquiry_entry *e;
1361
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001362 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001363
1364 list_for_each_entry(e, &cache->unknown, list) {
1365 if (!bacmp(&e->data.bdaddr, bdaddr))
1366 return e;
1367 }
1368
1369 return NULL;
1370}
1371
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001372struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001373 bdaddr_t *bdaddr,
1374 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001375{
1376 struct discovery_state *cache = &hdev->discovery;
1377 struct inquiry_entry *e;
1378
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001379 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001380
1381 list_for_each_entry(e, &cache->resolve, list) {
1382 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1383 return e;
1384 if (!bacmp(&e->data.bdaddr, bdaddr))
1385 return e;
1386 }
1387
1388 return NULL;
1389}
1390
Johan Hedberga3d4e202012-01-09 00:53:02 +02001391void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001392 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001393{
1394 struct discovery_state *cache = &hdev->discovery;
1395 struct list_head *pos = &cache->resolve;
1396 struct inquiry_entry *p;
1397
1398 list_del(&ie->list);
1399
1400 list_for_each_entry(p, &cache->resolve, list) {
1401 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001402 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001403 break;
1404 pos = &p->list;
1405 }
1406
1407 list_add(&ie->list, pos);
1408}
1409
Johan Hedberg31754052012-01-04 13:39:52 +02001410bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001411 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412{
Johan Hedberg30883512012-01-04 14:16:21 +02001413 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001414 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001416 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
Szymon Janc2b2fec42012-11-20 11:38:54 +01001418 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1419
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001420 if (ssp)
1421 *ssp = data->ssp_mode;
1422
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001423 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001424 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001425 if (ie->data.ssp_mode && ssp)
1426 *ssp = true;
1427
Johan Hedberga3d4e202012-01-09 00:53:02 +02001428 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001429 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001430 ie->data.rssi = data->rssi;
1431 hci_inquiry_cache_update_resolve(hdev, ie);
1432 }
1433
Johan Hedberg561aafb2012-01-04 13:31:59 +02001434 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001435 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001436
Johan Hedberg561aafb2012-01-04 13:31:59 +02001437 /* Entry not in the cache. Add new one. */
1438 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1439 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001440 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001441
1442 list_add(&ie->all, &cache->all);
1443
1444 if (name_known) {
1445 ie->name_state = NAME_KNOWN;
1446 } else {
1447 ie->name_state = NAME_NOT_KNOWN;
1448 list_add(&ie->list, &cache->unknown);
1449 }
1450
1451update:
1452 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001453 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001454 ie->name_state = NAME_KNOWN;
1455 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 }
1457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001458 memcpy(&ie->data, data, sizeof(*data));
1459 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001461
1462 if (ie->name_state == NAME_NOT_KNOWN)
1463 return false;
1464
1465 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466}
1467
1468static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1469{
Johan Hedberg30883512012-01-04 14:16:21 +02001470 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 struct inquiry_info *info = (struct inquiry_info *) buf;
1472 struct inquiry_entry *e;
1473 int copied = 0;
1474
Johan Hedberg561aafb2012-01-04 13:31:59 +02001475 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001477
1478 if (copied >= num)
1479 break;
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 bacpy(&info->bdaddr, &data->bdaddr);
1482 info->pscan_rep_mode = data->pscan_rep_mode;
1483 info->pscan_period_mode = data->pscan_period_mode;
1484 info->pscan_mode = data->pscan_mode;
1485 memcpy(info->dev_class, data->dev_class, 3);
1486 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001489 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
1491
1492 BT_DBG("cache %p, copied %d", cache, copied);
1493 return copied;
1494}
1495
Johan Hedberg42c6b122013-03-05 20:37:49 +02001496static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497{
1498 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001499 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 struct hci_cp_inquiry cp;
1501
1502 BT_DBG("%s", hdev->name);
1503
1504 if (test_bit(HCI_INQUIRY, &hdev->flags))
1505 return;
1506
1507 /* Start Inquiry */
1508 memcpy(&cp.lap, &ir->lap, 3);
1509 cp.length = ir->length;
1510 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512}
1513
Andre Guedes3e13fa12013-03-27 20:04:56 -03001514static int wait_inquiry(void *word)
1515{
1516 schedule();
1517 return signal_pending(current);
1518}
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520int hci_inquiry(void __user *arg)
1521{
1522 __u8 __user *ptr = arg;
1523 struct hci_inquiry_req ir;
1524 struct hci_dev *hdev;
1525 int err = 0, do_inquiry = 0, max_rsp;
1526 long timeo;
1527 __u8 *buf;
1528
1529 if (copy_from_user(&ir, ptr, sizeof(ir)))
1530 return -EFAULT;
1531
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001532 hdev = hci_dev_get(ir.dev_id);
1533 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return -ENODEV;
1535
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1537 err = -EBUSY;
1538 goto done;
1539 }
1540
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001541 if (hdev->dev_type != HCI_BREDR) {
1542 err = -EOPNOTSUPP;
1543 goto done;
1544 }
1545
Johan Hedberg56f87902013-10-02 13:43:13 +03001546 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1547 err = -EOPNOTSUPP;
1548 goto done;
1549 }
1550
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001551 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001552 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001553 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001554 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 do_inquiry = 1;
1556 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001557 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
Marcel Holtmann04837f62006-07-03 10:02:33 +02001559 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001560
1561 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001562 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1563 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001564 if (err < 0)
1565 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001566
1567 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1568 * cleared). If it is interrupted by a signal, return -EINTR.
1569 */
1570 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1571 TASK_INTERRUPTIBLE))
1572 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001575 /* for unlimited number of responses we will use buffer with
1576 * 255 entries
1577 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1579
1580 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1581 * copy it to the user space.
1582 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001583 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001584 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 err = -ENOMEM;
1586 goto done;
1587 }
1588
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001589 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001591 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
1593 BT_DBG("num_rsp %d", ir.num_rsp);
1594
1595 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1596 ptr += sizeof(ir);
1597 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001598 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001600 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 err = -EFAULT;
1602
1603 kfree(buf);
1604
1605done:
1606 hci_dev_put(hdev);
1607 return err;
1608}
1609
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001610static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 int ret = 0;
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 BT_DBG("%s %p", hdev->name, hdev);
1615
1616 hci_req_lock(hdev);
1617
Johan Hovold94324962012-03-15 14:48:41 +01001618 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1619 ret = -ENODEV;
1620 goto done;
1621 }
1622
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001623 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1624 /* Check for rfkill but allow the HCI setup stage to
1625 * proceed (which in itself doesn't cause any RF activity).
1626 */
1627 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1628 ret = -ERFKILL;
1629 goto done;
1630 }
1631
1632 /* Check for valid public address or a configured static
1633 * random adddress, but let the HCI setup proceed to
1634 * be able to determine if there is a public address
1635 * or not.
1636 *
1637 * This check is only valid for BR/EDR controllers
1638 * since AMP controllers do not have an address.
1639 */
1640 if (hdev->dev_type == HCI_BREDR &&
1641 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1642 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1643 ret = -EADDRNOTAVAIL;
1644 goto done;
1645 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001646 }
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 if (test_bit(HCI_UP, &hdev->flags)) {
1649 ret = -EALREADY;
1650 goto done;
1651 }
1652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 if (hdev->open(hdev)) {
1654 ret = -EIO;
1655 goto done;
1656 }
1657
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001658 atomic_set(&hdev->cmd_cnt, 1);
1659 set_bit(HCI_INIT, &hdev->flags);
1660
1661 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1662 ret = hdev->setup(hdev);
1663
1664 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001665 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1666 set_bit(HCI_RAW, &hdev->flags);
1667
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001668 if (!test_bit(HCI_RAW, &hdev->flags) &&
1669 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001670 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 }
1672
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001673 clear_bit(HCI_INIT, &hdev->flags);
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 if (!ret) {
1676 hci_dev_hold(hdev);
1677 set_bit(HCI_UP, &hdev->flags);
1678 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001679 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001680 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001681 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001682 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001683 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001684 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001685 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001686 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001688 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001689 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001690 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
1692 skb_queue_purge(&hdev->cmd_q);
1693 skb_queue_purge(&hdev->rx_q);
1694
1695 if (hdev->flush)
1696 hdev->flush(hdev);
1697
1698 if (hdev->sent_cmd) {
1699 kfree_skb(hdev->sent_cmd);
1700 hdev->sent_cmd = NULL;
1701 }
1702
1703 hdev->close(hdev);
1704 hdev->flags = 0;
1705 }
1706
1707done:
1708 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 return ret;
1710}
1711
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001712/* ---- HCI ioctl helpers ---- */
1713
1714int hci_dev_open(__u16 dev)
1715{
1716 struct hci_dev *hdev;
1717 int err;
1718
1719 hdev = hci_dev_get(dev);
1720 if (!hdev)
1721 return -ENODEV;
1722
Johan Hedberge1d08f42013-10-01 22:44:50 +03001723 /* We need to ensure that no other power on/off work is pending
1724 * before proceeding to call hci_dev_do_open. This is
1725 * particularly important if the setup procedure has not yet
1726 * completed.
1727 */
1728 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1729 cancel_delayed_work(&hdev->power_off);
1730
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001731 /* After this call it is guaranteed that the setup procedure
1732 * has finished. This means that error conditions like RFKILL
1733 * or no valid public or static random address apply.
1734 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001735 flush_workqueue(hdev->req_workqueue);
1736
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001737 err = hci_dev_do_open(hdev);
1738
1739 hci_dev_put(hdev);
1740
1741 return err;
1742}
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744static int hci_dev_do_close(struct hci_dev *hdev)
1745{
1746 BT_DBG("%s %p", hdev->name, hdev);
1747
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001748 cancel_delayed_work(&hdev->power_off);
1749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 hci_req_cancel(hdev, ENODEV);
1751 hci_req_lock(hdev);
1752
1753 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001754 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 hci_req_unlock(hdev);
1756 return 0;
1757 }
1758
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001759 /* Flush RX and TX works */
1760 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001761 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001763 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001764 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001765 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001766 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001767 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001768 }
1769
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001770 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001771 cancel_delayed_work(&hdev->service_cache);
1772
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001773 cancel_delayed_work_sync(&hdev->le_scan_disable);
1774
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001775 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001776 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001778 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780 hci_notify(hdev, HCI_DEV_DOWN);
1781
1782 if (hdev->flush)
1783 hdev->flush(hdev);
1784
1785 /* Reset device */
1786 skb_queue_purge(&hdev->cmd_q);
1787 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001788 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001789 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001790 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001792 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 clear_bit(HCI_INIT, &hdev->flags);
1794 }
1795
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001796 /* flush cmd work */
1797 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 /* Drop queues */
1800 skb_queue_purge(&hdev->rx_q);
1801 skb_queue_purge(&hdev->cmd_q);
1802 skb_queue_purge(&hdev->raw_q);
1803
1804 /* Drop last sent command */
1805 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001806 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 kfree_skb(hdev->sent_cmd);
1808 hdev->sent_cmd = NULL;
1809 }
1810
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001811 kfree_skb(hdev->recv_evt);
1812 hdev->recv_evt = NULL;
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 /* After this point our queues are empty
1815 * and no tasks are scheduled. */
1816 hdev->close(hdev);
1817
Johan Hedberg35b973c2013-03-15 17:06:59 -05001818 /* Clear flags */
1819 hdev->flags = 0;
1820 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1821
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001822 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1823 if (hdev->dev_type == HCI_BREDR) {
1824 hci_dev_lock(hdev);
1825 mgmt_powered(hdev, 0);
1826 hci_dev_unlock(hdev);
1827 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001828 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001829
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001830 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001831 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001832
Johan Hedberge59fda82012-02-22 18:11:53 +02001833 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001834 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 hci_req_unlock(hdev);
1837
1838 hci_dev_put(hdev);
1839 return 0;
1840}
1841
1842int hci_dev_close(__u16 dev)
1843{
1844 struct hci_dev *hdev;
1845 int err;
1846
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001847 hdev = hci_dev_get(dev);
1848 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001850
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001851 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1852 err = -EBUSY;
1853 goto done;
1854 }
1855
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001856 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1857 cancel_delayed_work(&hdev->power_off);
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001860
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001861done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 hci_dev_put(hdev);
1863 return err;
1864}
1865
1866int hci_dev_reset(__u16 dev)
1867{
1868 struct hci_dev *hdev;
1869 int ret = 0;
1870
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001871 hdev = hci_dev_get(dev);
1872 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 return -ENODEV;
1874
1875 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Marcel Holtmann808a0492013-08-26 20:57:58 -07001877 if (!test_bit(HCI_UP, &hdev->flags)) {
1878 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001880 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001882 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1883 ret = -EBUSY;
1884 goto done;
1885 }
1886
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 /* Drop queues */
1888 skb_queue_purge(&hdev->rx_q);
1889 skb_queue_purge(&hdev->cmd_q);
1890
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001891 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001892 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 if (hdev->flush)
1897 hdev->flush(hdev);
1898
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001899 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001900 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001903 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
1905done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 hci_req_unlock(hdev);
1907 hci_dev_put(hdev);
1908 return ret;
1909}
1910
1911int hci_dev_reset_stat(__u16 dev)
1912{
1913 struct hci_dev *hdev;
1914 int ret = 0;
1915
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001916 hdev = hci_dev_get(dev);
1917 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 return -ENODEV;
1919
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001920 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1921 ret = -EBUSY;
1922 goto done;
1923 }
1924
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1926
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001927done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 return ret;
1930}
1931
1932int hci_dev_cmd(unsigned int cmd, void __user *arg)
1933{
1934 struct hci_dev *hdev;
1935 struct hci_dev_req dr;
1936 int err = 0;
1937
1938 if (copy_from_user(&dr, arg, sizeof(dr)))
1939 return -EFAULT;
1940
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001941 hdev = hci_dev_get(dr.dev_id);
1942 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 return -ENODEV;
1944
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001945 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1946 err = -EBUSY;
1947 goto done;
1948 }
1949
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001950 if (hdev->dev_type != HCI_BREDR) {
1951 err = -EOPNOTSUPP;
1952 goto done;
1953 }
1954
Johan Hedberg56f87902013-10-02 13:43:13 +03001955 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1956 err = -EOPNOTSUPP;
1957 goto done;
1958 }
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 switch (cmd) {
1961 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001962 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1963 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 break;
1965
1966 case HCISETENCRYPT:
1967 if (!lmp_encrypt_capable(hdev)) {
1968 err = -EOPNOTSUPP;
1969 break;
1970 }
1971
1972 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1973 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001974 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1975 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 if (err)
1977 break;
1978 }
1979
Johan Hedberg01178cd2013-03-05 20:37:41 +02001980 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1981 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 break;
1983
1984 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001985 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1986 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 break;
1988
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001989 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001990 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1991 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001992 break;
1993
1994 case HCISETLINKMODE:
1995 hdev->link_mode = ((__u16) dr.dev_opt) &
1996 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1997 break;
1998
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 case HCISETPTYPE:
2000 hdev->pkt_type = (__u16) dr.dev_opt;
2001 break;
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002004 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2005 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 break;
2007
2008 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002009 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2010 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 break;
2012
2013 default:
2014 err = -EINVAL;
2015 break;
2016 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002017
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002018done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 hci_dev_put(hdev);
2020 return err;
2021}
2022
2023int hci_get_dev_list(void __user *arg)
2024{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002025 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 struct hci_dev_list_req *dl;
2027 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 int n = 0, size, err;
2029 __u16 dev_num;
2030
2031 if (get_user(dev_num, (__u16 __user *) arg))
2032 return -EFAULT;
2033
2034 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2035 return -EINVAL;
2036
2037 size = sizeof(*dl) + dev_num * sizeof(*dr);
2038
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002039 dl = kzalloc(size, GFP_KERNEL);
2040 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 return -ENOMEM;
2042
2043 dr = dl->dev_req;
2044
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002045 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002046 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002047 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002048 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002049
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002050 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2051 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 (dr + n)->dev_id = hdev->id;
2054 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 if (++n >= dev_num)
2057 break;
2058 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002059 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 dl->dev_num = n;
2062 size = sizeof(*dl) + n * sizeof(*dr);
2063
2064 err = copy_to_user(arg, dl, size);
2065 kfree(dl);
2066
2067 return err ? -EFAULT : 0;
2068}
2069
2070int hci_get_dev_info(void __user *arg)
2071{
2072 struct hci_dev *hdev;
2073 struct hci_dev_info di;
2074 int err = 0;
2075
2076 if (copy_from_user(&di, arg, sizeof(di)))
2077 return -EFAULT;
2078
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002079 hdev = hci_dev_get(di.dev_id);
2080 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 return -ENODEV;
2082
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002083 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002084 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002085
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002086 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2087 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 strcpy(di.name, hdev->name);
2090 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002091 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 di.flags = hdev->flags;
2093 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002094 if (lmp_bredr_capable(hdev)) {
2095 di.acl_mtu = hdev->acl_mtu;
2096 di.acl_pkts = hdev->acl_pkts;
2097 di.sco_mtu = hdev->sco_mtu;
2098 di.sco_pkts = hdev->sco_pkts;
2099 } else {
2100 di.acl_mtu = hdev->le_mtu;
2101 di.acl_pkts = hdev->le_pkts;
2102 di.sco_mtu = 0;
2103 di.sco_pkts = 0;
2104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 di.link_policy = hdev->link_policy;
2106 di.link_mode = hdev->link_mode;
2107
2108 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2109 memcpy(&di.features, &hdev->features, sizeof(di.features));
2110
2111 if (copy_to_user(arg, &di, sizeof(di)))
2112 err = -EFAULT;
2113
2114 hci_dev_put(hdev);
2115
2116 return err;
2117}
2118
2119/* ---- Interface to HCI drivers ---- */
2120
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002121static int hci_rfkill_set_block(void *data, bool blocked)
2122{
2123 struct hci_dev *hdev = data;
2124
2125 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2126
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002127 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2128 return -EBUSY;
2129
Johan Hedberg5e130362013-09-13 08:58:17 +03002130 if (blocked) {
2131 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002132 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2133 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002134 } else {
2135 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002136 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002137
2138 return 0;
2139}
2140
2141static const struct rfkill_ops hci_rfkill_ops = {
2142 .set_block = hci_rfkill_set_block,
2143};
2144
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002145static void hci_power_on(struct work_struct *work)
2146{
2147 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002148 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002149
2150 BT_DBG("%s", hdev->name);
2151
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002152 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002153 if (err < 0) {
2154 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002155 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002156 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002157
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002158 /* During the HCI setup phase, a few error conditions are
2159 * ignored and they need to be checked now. If they are still
2160 * valid, it is important to turn the device back off.
2161 */
2162 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2163 (hdev->dev_type == HCI_BREDR &&
2164 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2165 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002166 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2167 hci_dev_do_close(hdev);
2168 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002169 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2170 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002171 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002172
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002173 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002174 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002175}
2176
2177static void hci_power_off(struct work_struct *work)
2178{
Johan Hedberg32435532011-11-07 22:16:04 +02002179 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002180 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002181
2182 BT_DBG("%s", hdev->name);
2183
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002184 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002185}
2186
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002187static void hci_discov_off(struct work_struct *work)
2188{
2189 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002190
2191 hdev = container_of(work, struct hci_dev, discov_off.work);
2192
2193 BT_DBG("%s", hdev->name);
2194
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002195 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002196}
2197
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002198int hci_uuids_clear(struct hci_dev *hdev)
2199{
Johan Hedberg48210022013-01-27 00:31:28 +02002200 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002201
Johan Hedberg48210022013-01-27 00:31:28 +02002202 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2203 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002204 kfree(uuid);
2205 }
2206
2207 return 0;
2208}
2209
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002210int hci_link_keys_clear(struct hci_dev *hdev)
2211{
2212 struct list_head *p, *n;
2213
2214 list_for_each_safe(p, n, &hdev->link_keys) {
2215 struct link_key *key;
2216
2217 key = list_entry(p, struct link_key, list);
2218
2219 list_del(p);
2220 kfree(key);
2221 }
2222
2223 return 0;
2224}
2225
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002226int hci_smp_ltks_clear(struct hci_dev *hdev)
2227{
2228 struct smp_ltk *k, *tmp;
2229
2230 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2231 list_del(&k->list);
2232 kfree(k);
2233 }
2234
2235 return 0;
2236}
2237
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002238struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2239{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002240 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002241
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002242 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002243 if (bacmp(bdaddr, &k->bdaddr) == 0)
2244 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002245
2246 return NULL;
2247}
2248
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302249static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002250 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002251{
2252 /* Legacy key */
2253 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302254 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002255
2256 /* Debug keys are insecure so don't store them persistently */
2257 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302258 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002259
2260 /* Changed combination key and there's no previous one */
2261 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302262 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002263
2264 /* Security mode 3 case */
2265 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302266 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002267
2268 /* Neither local nor remote side had no-bonding as requirement */
2269 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302270 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002271
2272 /* Local side had dedicated bonding as requirement */
2273 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302274 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002275
2276 /* Remote side had dedicated bonding as requirement */
2277 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302278 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002279
2280 /* If none of the above criteria match, then don't store the key
2281 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302282 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002283}
2284
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002285struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002286{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002287 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002288
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002289 list_for_each_entry(k, &hdev->long_term_keys, list) {
2290 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002291 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002292 continue;
2293
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002294 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002295 }
2296
2297 return NULL;
2298}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002299
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002300struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002301 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002302{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002303 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002304
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002305 list_for_each_entry(k, &hdev->long_term_keys, list)
2306 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002307 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002308 return k;
2309
2310 return NULL;
2311}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002312
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002313int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002314 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002315{
2316 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302317 u8 old_key_type;
2318 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002319
2320 old_key = hci_find_link_key(hdev, bdaddr);
2321 if (old_key) {
2322 old_key_type = old_key->type;
2323 key = old_key;
2324 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002325 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002326 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2327 if (!key)
2328 return -ENOMEM;
2329 list_add(&key->list, &hdev->link_keys);
2330 }
2331
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002332 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002333
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002334 /* Some buggy controller combinations generate a changed
2335 * combination key for legacy pairing even when there's no
2336 * previous key */
2337 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002338 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002339 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002340 if (conn)
2341 conn->key_type = type;
2342 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002343
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002344 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002345 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002346 key->pin_len = pin_len;
2347
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002348 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002349 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002350 else
2351 key->type = type;
2352
Johan Hedberg4df378a2011-04-28 11:29:03 -07002353 if (!new_key)
2354 return 0;
2355
2356 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2357
Johan Hedberg744cf192011-11-08 20:40:14 +02002358 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002359
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302360 if (conn)
2361 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002362
2363 return 0;
2364}
2365
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002366int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002367 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002368 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002369{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002370 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002371
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002372 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2373 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002374
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002375 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2376 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002377 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002378 else {
2379 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002380 if (!key)
2381 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002382 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002383 }
2384
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002385 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002386 key->bdaddr_type = addr_type;
2387 memcpy(key->val, tk, sizeof(key->val));
2388 key->authenticated = authenticated;
2389 key->ediv = ediv;
2390 key->enc_size = enc_size;
2391 key->type = type;
2392 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002393
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002394 if (!new_key)
2395 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002396
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002397 if (type & HCI_SMP_LTK)
2398 mgmt_new_ltk(hdev, key, 1);
2399
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002400 return 0;
2401}
2402
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002403int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2404{
2405 struct link_key *key;
2406
2407 key = hci_find_link_key(hdev, bdaddr);
2408 if (!key)
2409 return -ENOENT;
2410
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002412
2413 list_del(&key->list);
2414 kfree(key);
2415
2416 return 0;
2417}
2418
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002419int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2420{
2421 struct smp_ltk *k, *tmp;
2422
2423 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2424 if (bacmp(bdaddr, &k->bdaddr))
2425 continue;
2426
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002427 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002428
2429 list_del(&k->list);
2430 kfree(k);
2431 }
2432
2433 return 0;
2434}
2435
Ville Tervo6bd32322011-02-16 16:32:41 +02002436/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002437static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002438{
2439 struct hci_dev *hdev = (void *) arg;
2440
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002441 if (hdev->sent_cmd) {
2442 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2443 u16 opcode = __le16_to_cpu(sent->opcode);
2444
2445 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2446 } else {
2447 BT_ERR("%s command tx timeout", hdev->name);
2448 }
2449
Ville Tervo6bd32322011-02-16 16:32:41 +02002450 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002451 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002452}
2453
Szymon Janc2763eda2011-03-22 13:12:22 +01002454struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002455 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002456{
2457 struct oob_data *data;
2458
2459 list_for_each_entry(data, &hdev->remote_oob_data, list)
2460 if (bacmp(bdaddr, &data->bdaddr) == 0)
2461 return data;
2462
2463 return NULL;
2464}
2465
2466int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2467{
2468 struct oob_data *data;
2469
2470 data = hci_find_remote_oob_data(hdev, bdaddr);
2471 if (!data)
2472 return -ENOENT;
2473
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002474 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002475
2476 list_del(&data->list);
2477 kfree(data);
2478
2479 return 0;
2480}
2481
2482int hci_remote_oob_data_clear(struct hci_dev *hdev)
2483{
2484 struct oob_data *data, *n;
2485
2486 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2487 list_del(&data->list);
2488 kfree(data);
2489 }
2490
2491 return 0;
2492}
2493
2494int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002495 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002496{
2497 struct oob_data *data;
2498
2499 data = hci_find_remote_oob_data(hdev, bdaddr);
2500
2501 if (!data) {
2502 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2503 if (!data)
2504 return -ENOMEM;
2505
2506 bacpy(&data->bdaddr, bdaddr);
2507 list_add(&data->list, &hdev->remote_oob_data);
2508 }
2509
2510 memcpy(data->hash, hash, sizeof(data->hash));
2511 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2512
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002513 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002514
2515 return 0;
2516}
2517
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002518struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2519 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002520{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002521 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002522
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002523 list_for_each_entry(b, &hdev->blacklist, list) {
2524 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002525 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002526 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002527
2528 return NULL;
2529}
2530
2531int hci_blacklist_clear(struct hci_dev *hdev)
2532{
2533 struct list_head *p, *n;
2534
2535 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002536 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002537
2538 list_del(p);
2539 kfree(b);
2540 }
2541
2542 return 0;
2543}
2544
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002545int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002546{
2547 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002548
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002549 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002550 return -EBADF;
2551
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002552 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002553 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002554
2555 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002556 if (!entry)
2557 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002558
2559 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002560 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002561
2562 list_add(&entry->list, &hdev->blacklist);
2563
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002564 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002565}
2566
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002567int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002568{
2569 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002570
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002571 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002572 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002573
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002574 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002575 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002576 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002577
2578 list_del(&entry->list);
2579 kfree(entry);
2580
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002581 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002582}
2583
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002584static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002585{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002586 if (status) {
2587 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002588
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002589 hci_dev_lock(hdev);
2590 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2591 hci_dev_unlock(hdev);
2592 return;
2593 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002594}
2595
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002596static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002597{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002598 /* General inquiry access code (GIAC) */
2599 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2600 struct hci_request req;
2601 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002602 int err;
2603
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002604 if (status) {
2605 BT_ERR("Failed to disable LE scanning: status %d", status);
2606 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002607 }
2608
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002609 switch (hdev->discovery.type) {
2610 case DISCOV_TYPE_LE:
2611 hci_dev_lock(hdev);
2612 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2613 hci_dev_unlock(hdev);
2614 break;
2615
2616 case DISCOV_TYPE_INTERLEAVED:
2617 hci_req_init(&req, hdev);
2618
2619 memset(&cp, 0, sizeof(cp));
2620 memcpy(&cp.lap, lap, sizeof(cp.lap));
2621 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2622 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2623
2624 hci_dev_lock(hdev);
2625
2626 hci_inquiry_cache_flush(hdev);
2627
2628 err = hci_req_run(&req, inquiry_complete);
2629 if (err) {
2630 BT_ERR("Inquiry request failed: err %d", err);
2631 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2632 }
2633
2634 hci_dev_unlock(hdev);
2635 break;
2636 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002637}
2638
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002639static void le_scan_disable_work(struct work_struct *work)
2640{
2641 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002642 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002643 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002644 struct hci_request req;
2645 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002646
2647 BT_DBG("%s", hdev->name);
2648
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002649 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002650
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002651 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002652 cp.enable = LE_SCAN_DISABLE;
2653 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002654
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002655 err = hci_req_run(&req, le_scan_disable_work_complete);
2656 if (err)
2657 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002658}
2659
David Herrmann9be0dab2012-04-22 14:39:57 +02002660/* Alloc HCI device */
2661struct hci_dev *hci_alloc_dev(void)
2662{
2663 struct hci_dev *hdev;
2664
2665 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2666 if (!hdev)
2667 return NULL;
2668
David Herrmannb1b813d2012-04-22 14:39:58 +02002669 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2670 hdev->esco_type = (ESCO_HV1);
2671 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002672 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2673 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002674 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2675 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002676
David Herrmannb1b813d2012-04-22 14:39:58 +02002677 hdev->sniff_max_interval = 800;
2678 hdev->sniff_min_interval = 80;
2679
Marcel Holtmannbef64732013-10-11 08:23:19 -07002680 hdev->le_scan_interval = 0x0060;
2681 hdev->le_scan_window = 0x0030;
2682
David Herrmannb1b813d2012-04-22 14:39:58 +02002683 mutex_init(&hdev->lock);
2684 mutex_init(&hdev->req_lock);
2685
2686 INIT_LIST_HEAD(&hdev->mgmt_pending);
2687 INIT_LIST_HEAD(&hdev->blacklist);
2688 INIT_LIST_HEAD(&hdev->uuids);
2689 INIT_LIST_HEAD(&hdev->link_keys);
2690 INIT_LIST_HEAD(&hdev->long_term_keys);
2691 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002692 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002693
2694 INIT_WORK(&hdev->rx_work, hci_rx_work);
2695 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2696 INIT_WORK(&hdev->tx_work, hci_tx_work);
2697 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002698
David Herrmannb1b813d2012-04-22 14:39:58 +02002699 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2700 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2701 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2702
David Herrmannb1b813d2012-04-22 14:39:58 +02002703 skb_queue_head_init(&hdev->rx_q);
2704 skb_queue_head_init(&hdev->cmd_q);
2705 skb_queue_head_init(&hdev->raw_q);
2706
2707 init_waitqueue_head(&hdev->req_wait_q);
2708
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002709 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002710
David Herrmannb1b813d2012-04-22 14:39:58 +02002711 hci_init_sysfs(hdev);
2712 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002713
2714 return hdev;
2715}
2716EXPORT_SYMBOL(hci_alloc_dev);
2717
2718/* Free HCI device */
2719void hci_free_dev(struct hci_dev *hdev)
2720{
David Herrmann9be0dab2012-04-22 14:39:57 +02002721 /* will free via device release */
2722 put_device(&hdev->dev);
2723}
2724EXPORT_SYMBOL(hci_free_dev);
2725
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726/* Register HCI device */
2727int hci_register_dev(struct hci_dev *hdev)
2728{
David Herrmannb1b813d2012-04-22 14:39:58 +02002729 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
David Herrmann010666a2012-01-07 15:47:07 +01002731 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 return -EINVAL;
2733
Mat Martineau08add512011-11-02 16:18:36 -07002734 /* Do not allow HCI_AMP devices to register at index 0,
2735 * so the index can be used as the AMP controller ID.
2736 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002737 switch (hdev->dev_type) {
2738 case HCI_BREDR:
2739 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2740 break;
2741 case HCI_AMP:
2742 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2743 break;
2744 default:
2745 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002747
Sasha Levin3df92b32012-05-27 22:36:56 +02002748 if (id < 0)
2749 return id;
2750
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 sprintf(hdev->name, "hci%d", id);
2752 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002753
2754 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2755
Kees Cookd8537542013-07-03 15:04:57 -07002756 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2757 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002758 if (!hdev->workqueue) {
2759 error = -ENOMEM;
2760 goto err;
2761 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002762
Kees Cookd8537542013-07-03 15:04:57 -07002763 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2764 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002765 if (!hdev->req_workqueue) {
2766 destroy_workqueue(hdev->workqueue);
2767 error = -ENOMEM;
2768 goto err;
2769 }
2770
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002771 if (!IS_ERR_OR_NULL(bt_debugfs))
2772 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2773
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002774 dev_set_name(&hdev->dev, "%s", hdev->name);
2775
2776 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002777 if (error < 0)
2778 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002780 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002781 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2782 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002783 if (hdev->rfkill) {
2784 if (rfkill_register(hdev->rfkill) < 0) {
2785 rfkill_destroy(hdev->rfkill);
2786 hdev->rfkill = NULL;
2787 }
2788 }
2789
Johan Hedberg5e130362013-09-13 08:58:17 +03002790 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2791 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2792
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002793 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002794 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002795
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002796 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002797 /* Assume BR/EDR support until proven otherwise (such as
2798 * through reading supported features during init.
2799 */
2800 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2801 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002802
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002803 write_lock(&hci_dev_list_lock);
2804 list_add(&hdev->list, &hci_dev_list);
2805 write_unlock(&hci_dev_list_lock);
2806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002808 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
Johan Hedberg19202572013-01-14 22:33:51 +02002810 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002813
David Herrmann33ca9542011-10-08 14:58:49 +02002814err_wqueue:
2815 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002816 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002817err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002818 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002819
David Herrmann33ca9542011-10-08 14:58:49 +02002820 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821}
2822EXPORT_SYMBOL(hci_register_dev);
2823
2824/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002825void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826{
Sasha Levin3df92b32012-05-27 22:36:56 +02002827 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002828
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002829 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Johan Hovold94324962012-03-15 14:48:41 +01002831 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2832
Sasha Levin3df92b32012-05-27 22:36:56 +02002833 id = hdev->id;
2834
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002835 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002837 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
2839 hci_dev_do_close(hdev);
2840
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302841 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002842 kfree_skb(hdev->reassembly[i]);
2843
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002844 cancel_work_sync(&hdev->power_on);
2845
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002846 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002847 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002848 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002849 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002850 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002851 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002852
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002853 /* mgmt_index_removed should take care of emptying the
2854 * pending list */
2855 BUG_ON(!list_empty(&hdev->mgmt_pending));
2856
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 hci_notify(hdev, HCI_DEV_UNREG);
2858
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002859 if (hdev->rfkill) {
2860 rfkill_unregister(hdev->rfkill);
2861 rfkill_destroy(hdev->rfkill);
2862 }
2863
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002864 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002865
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002866 debugfs_remove_recursive(hdev->debugfs);
2867
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002868 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002869 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002870
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002871 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002872 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002873 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002874 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002875 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002876 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002877 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002878
David Herrmanndc946bd2012-01-07 15:47:24 +01002879 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002880
2881 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882}
2883EXPORT_SYMBOL(hci_unregister_dev);
2884
2885/* Suspend HCI device */
2886int hci_suspend_dev(struct hci_dev *hdev)
2887{
2888 hci_notify(hdev, HCI_DEV_SUSPEND);
2889 return 0;
2890}
2891EXPORT_SYMBOL(hci_suspend_dev);
2892
2893/* Resume HCI device */
2894int hci_resume_dev(struct hci_dev *hdev)
2895{
2896 hci_notify(hdev, HCI_DEV_RESUME);
2897 return 0;
2898}
2899EXPORT_SYMBOL(hci_resume_dev);
2900
Marcel Holtmann76bca882009-11-18 00:40:39 +01002901/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002902int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002903{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002904 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002905 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002906 kfree_skb(skb);
2907 return -ENXIO;
2908 }
2909
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002910 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002911 bt_cb(skb)->incoming = 1;
2912
2913 /* Time stamp */
2914 __net_timestamp(skb);
2915
Marcel Holtmann76bca882009-11-18 00:40:39 +01002916 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002917 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002918
Marcel Holtmann76bca882009-11-18 00:40:39 +01002919 return 0;
2920}
2921EXPORT_SYMBOL(hci_recv_frame);
2922
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302923static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002924 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302925{
2926 int len = 0;
2927 int hlen = 0;
2928 int remain = count;
2929 struct sk_buff *skb;
2930 struct bt_skb_cb *scb;
2931
2932 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002933 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302934 return -EILSEQ;
2935
2936 skb = hdev->reassembly[index];
2937
2938 if (!skb) {
2939 switch (type) {
2940 case HCI_ACLDATA_PKT:
2941 len = HCI_MAX_FRAME_SIZE;
2942 hlen = HCI_ACL_HDR_SIZE;
2943 break;
2944 case HCI_EVENT_PKT:
2945 len = HCI_MAX_EVENT_SIZE;
2946 hlen = HCI_EVENT_HDR_SIZE;
2947 break;
2948 case HCI_SCODATA_PKT:
2949 len = HCI_MAX_SCO_SIZE;
2950 hlen = HCI_SCO_HDR_SIZE;
2951 break;
2952 }
2953
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002954 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302955 if (!skb)
2956 return -ENOMEM;
2957
2958 scb = (void *) skb->cb;
2959 scb->expect = hlen;
2960 scb->pkt_type = type;
2961
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302962 hdev->reassembly[index] = skb;
2963 }
2964
2965 while (count) {
2966 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002967 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302968
2969 memcpy(skb_put(skb, len), data, len);
2970
2971 count -= len;
2972 data += len;
2973 scb->expect -= len;
2974 remain = count;
2975
2976 switch (type) {
2977 case HCI_EVENT_PKT:
2978 if (skb->len == HCI_EVENT_HDR_SIZE) {
2979 struct hci_event_hdr *h = hci_event_hdr(skb);
2980 scb->expect = h->plen;
2981
2982 if (skb_tailroom(skb) < scb->expect) {
2983 kfree_skb(skb);
2984 hdev->reassembly[index] = NULL;
2985 return -ENOMEM;
2986 }
2987 }
2988 break;
2989
2990 case HCI_ACLDATA_PKT:
2991 if (skb->len == HCI_ACL_HDR_SIZE) {
2992 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2993 scb->expect = __le16_to_cpu(h->dlen);
2994
2995 if (skb_tailroom(skb) < scb->expect) {
2996 kfree_skb(skb);
2997 hdev->reassembly[index] = NULL;
2998 return -ENOMEM;
2999 }
3000 }
3001 break;
3002
3003 case HCI_SCODATA_PKT:
3004 if (skb->len == HCI_SCO_HDR_SIZE) {
3005 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3006 scb->expect = h->dlen;
3007
3008 if (skb_tailroom(skb) < scb->expect) {
3009 kfree_skb(skb);
3010 hdev->reassembly[index] = NULL;
3011 return -ENOMEM;
3012 }
3013 }
3014 break;
3015 }
3016
3017 if (scb->expect == 0) {
3018 /* Complete frame */
3019
3020 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003021 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303022
3023 hdev->reassembly[index] = NULL;
3024 return remain;
3025 }
3026 }
3027
3028 return remain;
3029}
3030
Marcel Holtmannef222012007-07-11 06:42:04 +02003031int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3032{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303033 int rem = 0;
3034
Marcel Holtmannef222012007-07-11 06:42:04 +02003035 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3036 return -EILSEQ;
3037
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003038 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003039 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303040 if (rem < 0)
3041 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003042
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303043 data += (count - rem);
3044 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003045 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003046
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303047 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003048}
3049EXPORT_SYMBOL(hci_recv_fragment);
3050
Suraj Sumangala99811512010-07-14 13:02:19 +05303051#define STREAM_REASSEMBLY 0
3052
3053int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3054{
3055 int type;
3056 int rem = 0;
3057
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003058 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303059 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3060
3061 if (!skb) {
3062 struct { char type; } *pkt;
3063
3064 /* Start of the frame */
3065 pkt = data;
3066 type = pkt->type;
3067
3068 data++;
3069 count--;
3070 } else
3071 type = bt_cb(skb)->pkt_type;
3072
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003073 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003074 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303075 if (rem < 0)
3076 return rem;
3077
3078 data += (count - rem);
3079 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003080 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303081
3082 return rem;
3083}
3084EXPORT_SYMBOL(hci_recv_stream_fragment);
3085
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086/* ---- Interface to upper protocols ---- */
3087
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088int hci_register_cb(struct hci_cb *cb)
3089{
3090 BT_DBG("%p name %s", cb, cb->name);
3091
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003092 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003094 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
3096 return 0;
3097}
3098EXPORT_SYMBOL(hci_register_cb);
3099
3100int hci_unregister_cb(struct hci_cb *cb)
3101{
3102 BT_DBG("%p name %s", cb, cb->name);
3103
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003104 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003106 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107
3108 return 0;
3109}
3110EXPORT_SYMBOL(hci_unregister_cb);
3111
Marcel Holtmann51086992013-10-10 14:54:19 -07003112static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003114 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003116 /* Time stamp */
3117 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003119 /* Send copy to monitor */
3120 hci_send_to_monitor(hdev, skb);
3121
3122 if (atomic_read(&hdev->promisc)) {
3123 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003124 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 }
3126
3127 /* Get rid of skb owner, prior to sending to the driver. */
3128 skb_orphan(skb);
3129
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003130 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003131 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132}
3133
Johan Hedberg3119ae92013-03-05 20:37:44 +02003134void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3135{
3136 skb_queue_head_init(&req->cmd_q);
3137 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003138 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003139}
3140
3141int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3142{
3143 struct hci_dev *hdev = req->hdev;
3144 struct sk_buff *skb;
3145 unsigned long flags;
3146
3147 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3148
Andre Guedes5d73e032013-03-08 11:20:16 -03003149 /* If an error occured during request building, remove all HCI
3150 * commands queued on the HCI request queue.
3151 */
3152 if (req->err) {
3153 skb_queue_purge(&req->cmd_q);
3154 return req->err;
3155 }
3156
Johan Hedberg3119ae92013-03-05 20:37:44 +02003157 /* Do not allow empty requests */
3158 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003159 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003160
3161 skb = skb_peek_tail(&req->cmd_q);
3162 bt_cb(skb)->req.complete = complete;
3163
3164 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3165 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3166 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3167
3168 queue_work(hdev->workqueue, &hdev->cmd_work);
3169
3170 return 0;
3171}
3172
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003173static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003174 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175{
3176 int len = HCI_COMMAND_HDR_SIZE + plen;
3177 struct hci_command_hdr *hdr;
3178 struct sk_buff *skb;
3179
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003181 if (!skb)
3182 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183
3184 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003185 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 hdr->plen = plen;
3187
3188 if (plen)
3189 memcpy(skb_put(skb, plen), param, plen);
3190
3191 BT_DBG("skb len %d", skb->len);
3192
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003194
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003195 return skb;
3196}
3197
3198/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003199int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3200 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003201{
3202 struct sk_buff *skb;
3203
3204 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3205
3206 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3207 if (!skb) {
3208 BT_ERR("%s no memory for command", hdev->name);
3209 return -ENOMEM;
3210 }
3211
Johan Hedberg11714b32013-03-05 20:37:47 +02003212 /* Stand-alone HCI commands must be flaged as
3213 * single-command requests.
3214 */
3215 bt_cb(skb)->req.start = true;
3216
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003218 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219
3220 return 0;
3221}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222
Johan Hedberg71c76a12013-03-05 20:37:46 +02003223/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003224void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3225 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003226{
3227 struct hci_dev *hdev = req->hdev;
3228 struct sk_buff *skb;
3229
3230 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3231
Andre Guedes34739c12013-03-08 11:20:18 -03003232 /* If an error occured during request building, there is no point in
3233 * queueing the HCI command. We can simply return.
3234 */
3235 if (req->err)
3236 return;
3237
Johan Hedberg71c76a12013-03-05 20:37:46 +02003238 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3239 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003240 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3241 hdev->name, opcode);
3242 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003243 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003244 }
3245
3246 if (skb_queue_empty(&req->cmd_q))
3247 bt_cb(skb)->req.start = true;
3248
Johan Hedberg02350a72013-04-03 21:50:29 +03003249 bt_cb(skb)->req.event = event;
3250
Johan Hedberg71c76a12013-03-05 20:37:46 +02003251 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003252}
3253
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003254void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3255 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003256{
3257 hci_req_add_ev(req, opcode, plen, param, 0);
3258}
3259
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003261void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262{
3263 struct hci_command_hdr *hdr;
3264
3265 if (!hdev->sent_cmd)
3266 return NULL;
3267
3268 hdr = (void *) hdev->sent_cmd->data;
3269
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003270 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 return NULL;
3272
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003273 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
3275 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3276}
3277
3278/* Send ACL data */
3279static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3280{
3281 struct hci_acl_hdr *hdr;
3282 int len = skb->len;
3283
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003284 skb_push(skb, HCI_ACL_HDR_SIZE);
3285 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003286 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003287 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3288 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289}
3290
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003291static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003292 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003294 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 struct hci_dev *hdev = conn->hdev;
3296 struct sk_buff *list;
3297
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003298 skb->len = skb_headlen(skb);
3299 skb->data_len = 0;
3300
3301 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003302
3303 switch (hdev->dev_type) {
3304 case HCI_BREDR:
3305 hci_add_acl_hdr(skb, conn->handle, flags);
3306 break;
3307 case HCI_AMP:
3308 hci_add_acl_hdr(skb, chan->handle, flags);
3309 break;
3310 default:
3311 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3312 return;
3313 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003314
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003315 list = skb_shinfo(skb)->frag_list;
3316 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 /* Non fragmented */
3318 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3319
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003320 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 } else {
3322 /* Fragmented */
3323 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3324
3325 skb_shinfo(skb)->frag_list = NULL;
3326
3327 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003328 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003330 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003331
3332 flags &= ~ACL_START;
3333 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 do {
3335 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003336
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003337 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003338 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
3340 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3341
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003342 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 } while (list);
3344
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003345 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003347}
3348
3349void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3350{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003351 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003352
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003353 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003354
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003355 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003357 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359
3360/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003361void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362{
3363 struct hci_dev *hdev = conn->hdev;
3364 struct hci_sco_hdr hdr;
3365
3366 BT_DBG("%s len %d", hdev->name, skb->len);
3367
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003368 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 hdr.dlen = skb->len;
3370
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003371 skb_push(skb, HCI_SCO_HDR_SIZE);
3372 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003373 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003375 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003378 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380
3381/* ---- HCI TX task (outgoing data) ---- */
3382
3383/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003384static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3385 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386{
3387 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003388 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003389 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003391 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003393
3394 rcu_read_lock();
3395
3396 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003397 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003399
3400 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3401 continue;
3402
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 num++;
3404
3405 if (c->sent < min) {
3406 min = c->sent;
3407 conn = c;
3408 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003409
3410 if (hci_conn_num(hdev, type) == num)
3411 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 }
3413
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003414 rcu_read_unlock();
3415
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003417 int cnt, q;
3418
3419 switch (conn->type) {
3420 case ACL_LINK:
3421 cnt = hdev->acl_cnt;
3422 break;
3423 case SCO_LINK:
3424 case ESCO_LINK:
3425 cnt = hdev->sco_cnt;
3426 break;
3427 case LE_LINK:
3428 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3429 break;
3430 default:
3431 cnt = 0;
3432 BT_ERR("Unknown link type");
3433 }
3434
3435 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 *quote = q ? q : 1;
3437 } else
3438 *quote = 0;
3439
3440 BT_DBG("conn %p quote %d", conn, *quote);
3441 return conn;
3442}
3443
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003444static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445{
3446 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003447 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
Ville Tervobae1f5d92011-02-10 22:38:53 -03003449 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003451 rcu_read_lock();
3452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003454 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003455 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003456 BT_ERR("%s killing stalled connection %pMR",
3457 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003458 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 }
3460 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003461
3462 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463}
3464
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003465static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3466 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003467{
3468 struct hci_conn_hash *h = &hdev->conn_hash;
3469 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003470 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003471 struct hci_conn *conn;
3472 int cnt, q, conn_num = 0;
3473
3474 BT_DBG("%s", hdev->name);
3475
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003476 rcu_read_lock();
3477
3478 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003479 struct hci_chan *tmp;
3480
3481 if (conn->type != type)
3482 continue;
3483
3484 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3485 continue;
3486
3487 conn_num++;
3488
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003489 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003490 struct sk_buff *skb;
3491
3492 if (skb_queue_empty(&tmp->data_q))
3493 continue;
3494
3495 skb = skb_peek(&tmp->data_q);
3496 if (skb->priority < cur_prio)
3497 continue;
3498
3499 if (skb->priority > cur_prio) {
3500 num = 0;
3501 min = ~0;
3502 cur_prio = skb->priority;
3503 }
3504
3505 num++;
3506
3507 if (conn->sent < min) {
3508 min = conn->sent;
3509 chan = tmp;
3510 }
3511 }
3512
3513 if (hci_conn_num(hdev, type) == conn_num)
3514 break;
3515 }
3516
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003517 rcu_read_unlock();
3518
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003519 if (!chan)
3520 return NULL;
3521
3522 switch (chan->conn->type) {
3523 case ACL_LINK:
3524 cnt = hdev->acl_cnt;
3525 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003526 case AMP_LINK:
3527 cnt = hdev->block_cnt;
3528 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003529 case SCO_LINK:
3530 case ESCO_LINK:
3531 cnt = hdev->sco_cnt;
3532 break;
3533 case LE_LINK:
3534 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3535 break;
3536 default:
3537 cnt = 0;
3538 BT_ERR("Unknown link type");
3539 }
3540
3541 q = cnt / num;
3542 *quote = q ? q : 1;
3543 BT_DBG("chan %p quote %d", chan, *quote);
3544 return chan;
3545}
3546
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003547static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3548{
3549 struct hci_conn_hash *h = &hdev->conn_hash;
3550 struct hci_conn *conn;
3551 int num = 0;
3552
3553 BT_DBG("%s", hdev->name);
3554
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003555 rcu_read_lock();
3556
3557 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003558 struct hci_chan *chan;
3559
3560 if (conn->type != type)
3561 continue;
3562
3563 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3564 continue;
3565
3566 num++;
3567
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003568 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003569 struct sk_buff *skb;
3570
3571 if (chan->sent) {
3572 chan->sent = 0;
3573 continue;
3574 }
3575
3576 if (skb_queue_empty(&chan->data_q))
3577 continue;
3578
3579 skb = skb_peek(&chan->data_q);
3580 if (skb->priority >= HCI_PRIO_MAX - 1)
3581 continue;
3582
3583 skb->priority = HCI_PRIO_MAX - 1;
3584
3585 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003586 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003587 }
3588
3589 if (hci_conn_num(hdev, type) == num)
3590 break;
3591 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003592
3593 rcu_read_unlock();
3594
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003595}
3596
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003597static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3598{
3599 /* Calculate count of blocks used by this packet */
3600 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3601}
3602
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003603static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 if (!test_bit(HCI_RAW, &hdev->flags)) {
3606 /* ACL tx timeout must be longer than maximum
3607 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003608 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003609 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003610 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003612}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003614static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003615{
3616 unsigned int cnt = hdev->acl_cnt;
3617 struct hci_chan *chan;
3618 struct sk_buff *skb;
3619 int quote;
3620
3621 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003622
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003623 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003624 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003625 u32 priority = (skb_peek(&chan->data_q))->priority;
3626 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003627 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003628 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003629
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003630 /* Stop if priority has changed */
3631 if (skb->priority < priority)
3632 break;
3633
3634 skb = skb_dequeue(&chan->data_q);
3635
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003636 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003637 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003638
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003639 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 hdev->acl_last_tx = jiffies;
3641
3642 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003643 chan->sent++;
3644 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 }
3646 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003647
3648 if (cnt != hdev->acl_cnt)
3649 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650}
3651
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003652static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003653{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003654 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003655 struct hci_chan *chan;
3656 struct sk_buff *skb;
3657 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003658 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003659
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003660 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003661
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003662 BT_DBG("%s", hdev->name);
3663
3664 if (hdev->dev_type == HCI_AMP)
3665 type = AMP_LINK;
3666 else
3667 type = ACL_LINK;
3668
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003669 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003670 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003671 u32 priority = (skb_peek(&chan->data_q))->priority;
3672 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3673 int blocks;
3674
3675 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003676 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003677
3678 /* Stop if priority has changed */
3679 if (skb->priority < priority)
3680 break;
3681
3682 skb = skb_dequeue(&chan->data_q);
3683
3684 blocks = __get_blocks(hdev, skb);
3685 if (blocks > hdev->block_cnt)
3686 return;
3687
3688 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003689 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003690
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003691 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003692 hdev->acl_last_tx = jiffies;
3693
3694 hdev->block_cnt -= blocks;
3695 quote -= blocks;
3696
3697 chan->sent += blocks;
3698 chan->conn->sent += blocks;
3699 }
3700 }
3701
3702 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003703 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003704}
3705
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003706static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003707{
3708 BT_DBG("%s", hdev->name);
3709
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003710 /* No ACL link over BR/EDR controller */
3711 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3712 return;
3713
3714 /* No AMP link over AMP controller */
3715 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003716 return;
3717
3718 switch (hdev->flow_ctl_mode) {
3719 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3720 hci_sched_acl_pkt(hdev);
3721 break;
3722
3723 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3724 hci_sched_acl_blk(hdev);
3725 break;
3726 }
3727}
3728
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003730static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731{
3732 struct hci_conn *conn;
3733 struct sk_buff *skb;
3734 int quote;
3735
3736 BT_DBG("%s", hdev->name);
3737
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003738 if (!hci_conn_num(hdev, SCO_LINK))
3739 return;
3740
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3742 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3743 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003744 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745
3746 conn->sent++;
3747 if (conn->sent == ~0)
3748 conn->sent = 0;
3749 }
3750 }
3751}
3752
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003753static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003754{
3755 struct hci_conn *conn;
3756 struct sk_buff *skb;
3757 int quote;
3758
3759 BT_DBG("%s", hdev->name);
3760
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003761 if (!hci_conn_num(hdev, ESCO_LINK))
3762 return;
3763
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003764 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3765 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003766 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3767 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003768 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003769
3770 conn->sent++;
3771 if (conn->sent == ~0)
3772 conn->sent = 0;
3773 }
3774 }
3775}
3776
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003777static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003778{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003779 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003780 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003781 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003782
3783 BT_DBG("%s", hdev->name);
3784
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003785 if (!hci_conn_num(hdev, LE_LINK))
3786 return;
3787
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003788 if (!test_bit(HCI_RAW, &hdev->flags)) {
3789 /* LE tx timeout must be longer than maximum
3790 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003791 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003792 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003793 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003794 }
3795
3796 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003797 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003798 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003799 u32 priority = (skb_peek(&chan->data_q))->priority;
3800 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003801 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003802 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003803
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003804 /* Stop if priority has changed */
3805 if (skb->priority < priority)
3806 break;
3807
3808 skb = skb_dequeue(&chan->data_q);
3809
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003810 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003811 hdev->le_last_tx = jiffies;
3812
3813 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003814 chan->sent++;
3815 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003816 }
3817 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003818
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003819 if (hdev->le_pkts)
3820 hdev->le_cnt = cnt;
3821 else
3822 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003823
3824 if (cnt != tmp)
3825 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003826}
3827
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003828static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003830 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 struct sk_buff *skb;
3832
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003833 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003834 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835
Marcel Holtmann52de5992013-09-03 18:08:38 -07003836 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3837 /* Schedule queues and send stuff to HCI driver */
3838 hci_sched_acl(hdev);
3839 hci_sched_sco(hdev);
3840 hci_sched_esco(hdev);
3841 hci_sched_le(hdev);
3842 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 /* Send next queued raw (unknown type) packet */
3845 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003846 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847}
3848
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003849/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850
3851/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003852static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853{
3854 struct hci_acl_hdr *hdr = (void *) skb->data;
3855 struct hci_conn *conn;
3856 __u16 handle, flags;
3857
3858 skb_pull(skb, HCI_ACL_HDR_SIZE);
3859
3860 handle = __le16_to_cpu(hdr->handle);
3861 flags = hci_flags(handle);
3862 handle = hci_handle(handle);
3863
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003864 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003865 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866
3867 hdev->stat.acl_rx++;
3868
3869 hci_dev_lock(hdev);
3870 conn = hci_conn_hash_lookup_handle(hdev, handle);
3871 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003872
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003874 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003875
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003877 l2cap_recv_acldata(conn, skb, flags);
3878 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003880 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003881 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 }
3883
3884 kfree_skb(skb);
3885}
3886
3887/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003888static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889{
3890 struct hci_sco_hdr *hdr = (void *) skb->data;
3891 struct hci_conn *conn;
3892 __u16 handle;
3893
3894 skb_pull(skb, HCI_SCO_HDR_SIZE);
3895
3896 handle = __le16_to_cpu(hdr->handle);
3897
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003898 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899
3900 hdev->stat.sco_rx++;
3901
3902 hci_dev_lock(hdev);
3903 conn = hci_conn_hash_lookup_handle(hdev, handle);
3904 hci_dev_unlock(hdev);
3905
3906 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003908 sco_recv_scodata(conn, skb);
3909 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003911 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003912 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913 }
3914
3915 kfree_skb(skb);
3916}
3917
Johan Hedberg9238f362013-03-05 20:37:48 +02003918static bool hci_req_is_complete(struct hci_dev *hdev)
3919{
3920 struct sk_buff *skb;
3921
3922 skb = skb_peek(&hdev->cmd_q);
3923 if (!skb)
3924 return true;
3925
3926 return bt_cb(skb)->req.start;
3927}
3928
Johan Hedberg42c6b122013-03-05 20:37:49 +02003929static void hci_resend_last(struct hci_dev *hdev)
3930{
3931 struct hci_command_hdr *sent;
3932 struct sk_buff *skb;
3933 u16 opcode;
3934
3935 if (!hdev->sent_cmd)
3936 return;
3937
3938 sent = (void *) hdev->sent_cmd->data;
3939 opcode = __le16_to_cpu(sent->opcode);
3940 if (opcode == HCI_OP_RESET)
3941 return;
3942
3943 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3944 if (!skb)
3945 return;
3946
3947 skb_queue_head(&hdev->cmd_q, skb);
3948 queue_work(hdev->workqueue, &hdev->cmd_work);
3949}
3950
Johan Hedberg9238f362013-03-05 20:37:48 +02003951void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3952{
3953 hci_req_complete_t req_complete = NULL;
3954 struct sk_buff *skb;
3955 unsigned long flags;
3956
3957 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3958
Johan Hedberg42c6b122013-03-05 20:37:49 +02003959 /* If the completed command doesn't match the last one that was
3960 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003961 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003962 if (!hci_sent_cmd_data(hdev, opcode)) {
3963 /* Some CSR based controllers generate a spontaneous
3964 * reset complete event during init and any pending
3965 * command will never be completed. In such a case we
3966 * need to resend whatever was the last sent
3967 * command.
3968 */
3969 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3970 hci_resend_last(hdev);
3971
Johan Hedberg9238f362013-03-05 20:37:48 +02003972 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003973 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003974
3975 /* If the command succeeded and there's still more commands in
3976 * this request the request is not yet complete.
3977 */
3978 if (!status && !hci_req_is_complete(hdev))
3979 return;
3980
3981 /* If this was the last command in a request the complete
3982 * callback would be found in hdev->sent_cmd instead of the
3983 * command queue (hdev->cmd_q).
3984 */
3985 if (hdev->sent_cmd) {
3986 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003987
3988 if (req_complete) {
3989 /* We must set the complete callback to NULL to
3990 * avoid calling the callback more than once if
3991 * this function gets called again.
3992 */
3993 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3994
Johan Hedberg9238f362013-03-05 20:37:48 +02003995 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003996 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003997 }
3998
3999 /* Remove all pending commands belonging to this request */
4000 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4001 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4002 if (bt_cb(skb)->req.start) {
4003 __skb_queue_head(&hdev->cmd_q, skb);
4004 break;
4005 }
4006
4007 req_complete = bt_cb(skb)->req.complete;
4008 kfree_skb(skb);
4009 }
4010 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4011
4012call_complete:
4013 if (req_complete)
4014 req_complete(hdev, status);
4015}
4016
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004017static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004019 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 struct sk_buff *skb;
4021
4022 BT_DBG("%s", hdev->name);
4023
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004025 /* Send copy to monitor */
4026 hci_send_to_monitor(hdev, skb);
4027
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 if (atomic_read(&hdev->promisc)) {
4029 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004030 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 }
4032
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004033 if (test_bit(HCI_RAW, &hdev->flags) ||
4034 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 kfree_skb(skb);
4036 continue;
4037 }
4038
4039 if (test_bit(HCI_INIT, &hdev->flags)) {
4040 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004041 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 case HCI_ACLDATA_PKT:
4043 case HCI_SCODATA_PKT:
4044 kfree_skb(skb);
4045 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004046 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 }
4048
4049 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004050 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004052 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 hci_event_packet(hdev, skb);
4054 break;
4055
4056 case HCI_ACLDATA_PKT:
4057 BT_DBG("%s ACL data packet", hdev->name);
4058 hci_acldata_packet(hdev, skb);
4059 break;
4060
4061 case HCI_SCODATA_PKT:
4062 BT_DBG("%s SCO data packet", hdev->name);
4063 hci_scodata_packet(hdev, skb);
4064 break;
4065
4066 default:
4067 kfree_skb(skb);
4068 break;
4069 }
4070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071}
4072
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004073static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004075 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 struct sk_buff *skb;
4077
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004078 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4079 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004082 if (atomic_read(&hdev->cmd_cnt)) {
4083 skb = skb_dequeue(&hdev->cmd_q);
4084 if (!skb)
4085 return;
4086
Wei Yongjun7585b972009-02-25 18:29:52 +08004087 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004089 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004090 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004092 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004093 if (test_bit(HCI_RESET, &hdev->flags))
4094 del_timer(&hdev->cmd_timer);
4095 else
4096 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004097 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 } else {
4099 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004100 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 }
4102 }
4103}