blob: 2a9e92503fd6034534d956ff3c9d47d3c362035e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -070068 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070069 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -070075 if (lmp_le_capable(hdev))
76 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78 hdev->le_features[0], hdev->le_features[1],
79 hdev->le_features[2], hdev->le_features[3],
80 hdev->le_features[4], hdev->le_features[5],
81 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070082 hci_dev_unlock(hdev);
83
84 return 0;
85}
86
87static int features_open(struct inode *inode, struct file *file)
88{
89 return single_open(file, features_show, inode->i_private);
90}
91
92static const struct file_operations features_fops = {
93 .open = features_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070099static int blacklist_show(struct seq_file *f, void *p)
100{
101 struct hci_dev *hdev = f->private;
102 struct bdaddr_list *b;
103
104 hci_dev_lock(hdev);
105 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700106 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700107 hci_dev_unlock(hdev);
108
109 return 0;
110}
111
112static int blacklist_open(struct inode *inode, struct file *file)
113{
114 return single_open(file, blacklist_show, inode->i_private);
115}
116
117static const struct file_operations blacklist_fops = {
118 .open = blacklist_open,
119 .read = seq_read,
120 .llseek = seq_lseek,
121 .release = single_release,
122};
123
Marcel Holtmann47219832013-10-17 17:24:15 -0700124static int uuids_show(struct seq_file *f, void *p)
125{
126 struct hci_dev *hdev = f->private;
127 struct bt_uuid *uuid;
128
129 hci_dev_lock(hdev);
130 list_for_each_entry(uuid, &hdev->uuids, list) {
131 u32 data0, data5;
132 u16 data1, data2, data3, data4;
133
134 data5 = get_unaligned_le32(uuid);
135 data4 = get_unaligned_le16(uuid + 4);
136 data3 = get_unaligned_le16(uuid + 6);
137 data2 = get_unaligned_le16(uuid + 8);
138 data1 = get_unaligned_le16(uuid + 10);
139 data0 = get_unaligned_le32(uuid + 12);
140
141 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142 data0, data1, data2, data3, data4, data5);
143 }
144 hci_dev_unlock(hdev);
145
146 return 0;
147}
148
149static int uuids_open(struct inode *inode, struct file *file)
150{
151 return single_open(file, uuids_show, inode->i_private);
152}
153
154static const struct file_operations uuids_fops = {
155 .open = uuids_open,
156 .read = seq_read,
157 .llseek = seq_lseek,
158 .release = single_release,
159};
160
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700161static int inquiry_cache_show(struct seq_file *f, void *p)
162{
163 struct hci_dev *hdev = f->private;
164 struct discovery_state *cache = &hdev->discovery;
165 struct inquiry_entry *e;
166
167 hci_dev_lock(hdev);
168
169 list_for_each_entry(e, &cache->all, all) {
170 struct inquiry_data *data = &e->data;
171 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
172 &data->bdaddr,
173 data->pscan_rep_mode, data->pscan_period_mode,
174 data->pscan_mode, data->dev_class[2],
175 data->dev_class[1], data->dev_class[0],
176 __le16_to_cpu(data->clock_offset),
177 data->rssi, data->ssp_mode, e->timestamp);
178 }
179
180 hci_dev_unlock(hdev);
181
182 return 0;
183}
184
185static int inquiry_cache_open(struct inode *inode, struct file *file)
186{
187 return single_open(file, inquiry_cache_show, inode->i_private);
188}
189
190static const struct file_operations inquiry_cache_fops = {
191 .open = inquiry_cache_open,
192 .read = seq_read,
193 .llseek = seq_lseek,
194 .release = single_release,
195};
196
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700197static int link_keys_show(struct seq_file *f, void *ptr)
198{
199 struct hci_dev *hdev = f->private;
200 struct list_head *p, *n;
201
202 hci_dev_lock(hdev);
203 list_for_each_safe(p, n, &hdev->link_keys) {
204 struct link_key *key = list_entry(p, struct link_key, list);
205 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
206 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
207 }
208 hci_dev_unlock(hdev);
209
210 return 0;
211}
212
213static int link_keys_open(struct inode *inode, struct file *file)
214{
215 return single_open(file, link_keys_show, inode->i_private);
216}
217
218static const struct file_operations link_keys_fops = {
219 .open = link_keys_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223};
224
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700225static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
227{
228 struct hci_dev *hdev = file->private_data;
229 char buf[3];
230
231 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
232 buf[1] = '\n';
233 buf[2] = '\0';
234 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
235}
236
237static const struct file_operations use_debug_keys_fops = {
238 .open = simple_open,
239 .read = use_debug_keys_read,
240 .llseek = default_llseek,
241};
242
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700243static int dev_class_show(struct seq_file *f, void *ptr)
244{
245 struct hci_dev *hdev = f->private;
246
247 hci_dev_lock(hdev);
248 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
249 hdev->dev_class[1], hdev->dev_class[0]);
250 hci_dev_unlock(hdev);
251
252 return 0;
253}
254
255static int dev_class_open(struct inode *inode, struct file *file)
256{
257 return single_open(file, dev_class_show, inode->i_private);
258}
259
260static const struct file_operations dev_class_fops = {
261 .open = dev_class_open,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265};
266
Marcel Holtmann041000b2013-10-17 12:02:31 -0700267static int voice_setting_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->voice_setting;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
279 NULL, "0x%4.4llx\n");
280
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700281static int auto_accept_delay_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 hci_dev_lock(hdev);
286 hdev->auto_accept_delay = val;
287 hci_dev_unlock(hdev);
288
289 return 0;
290}
291
292static int auto_accept_delay_get(void *data, u64 *val)
293{
294 struct hci_dev *hdev = data;
295
296 hci_dev_lock(hdev);
297 *val = hdev->auto_accept_delay;
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
304 auto_accept_delay_set, "%llu\n");
305
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700306static int ssp_debug_mode_set(void *data, u64 val)
307{
308 struct hci_dev *hdev = data;
309 struct sk_buff *skb;
310 __u8 mode;
311 int err;
312
313 if (val != 0 && val != 1)
314 return -EINVAL;
315
316 if (!test_bit(HCI_UP, &hdev->flags))
317 return -ENETDOWN;
318
319 hci_req_lock(hdev);
320 mode = val;
321 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
322 &mode, HCI_CMD_TIMEOUT);
323 hci_req_unlock(hdev);
324
325 if (IS_ERR(skb))
326 return PTR_ERR(skb);
327
328 err = -bt_to_errno(skb->data[0]);
329 kfree_skb(skb);
330
331 if (err < 0)
332 return err;
333
334 hci_dev_lock(hdev);
335 hdev->ssp_debug_mode = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int ssp_debug_mode_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->ssp_debug_mode;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
353 ssp_debug_mode_set, "%llu\n");
354
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700355static int idle_timeout_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358
359 if (val != 0 && (val < 500 || val > 3600000))
360 return -EINVAL;
361
362 hci_dev_lock(hdev);
363 hdev->idle_timeout= val;
364 hci_dev_unlock(hdev);
365
366 return 0;
367}
368
369static int idle_timeout_get(void *data, u64 *val)
370{
371 struct hci_dev *hdev = data;
372
373 hci_dev_lock(hdev);
374 *val = hdev->idle_timeout;
375 hci_dev_unlock(hdev);
376
377 return 0;
378}
379
380DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
381 idle_timeout_set, "%llu\n");
382
383static int sniff_min_interval_set(void *data, u64 val)
384{
385 struct hci_dev *hdev = data;
386
387 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
388 return -EINVAL;
389
390 hci_dev_lock(hdev);
391 hdev->sniff_min_interval= val;
392 hci_dev_unlock(hdev);
393
394 return 0;
395}
396
397static int sniff_min_interval_get(void *data, u64 *val)
398{
399 struct hci_dev *hdev = data;
400
401 hci_dev_lock(hdev);
402 *val = hdev->sniff_min_interval;
403 hci_dev_unlock(hdev);
404
405 return 0;
406}
407
408DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
409 sniff_min_interval_set, "%llu\n");
410
411static int sniff_max_interval_set(void *data, u64 val)
412{
413 struct hci_dev *hdev = data;
414
415 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
416 return -EINVAL;
417
418 hci_dev_lock(hdev);
419 hdev->sniff_max_interval= val;
420 hci_dev_unlock(hdev);
421
422 return 0;
423}
424
425static int sniff_max_interval_get(void *data, u64 *val)
426{
427 struct hci_dev *hdev = data;
428
429 hci_dev_lock(hdev);
430 *val = hdev->sniff_max_interval;
431 hci_dev_unlock(hdev);
432
433 return 0;
434}
435
436DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
437 sniff_max_interval_set, "%llu\n");
438
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700439static int static_address_show(struct seq_file *f, void *p)
440{
441 struct hci_dev *hdev = f->private;
442
443 hci_dev_lock(hdev);
444 seq_printf(f, "%pMR\n", &hdev->static_addr);
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int static_address_open(struct inode *inode, struct file *file)
451{
452 return single_open(file, static_address_show, inode->i_private);
453}
454
455static const struct file_operations static_address_fops = {
456 .open = static_address_open,
457 .read = seq_read,
458 .llseek = seq_lseek,
459 .release = single_release,
460};
461
Marcel Holtmann92202182013-10-18 16:38:10 -0700462static int own_address_type_set(void *data, u64 val)
463{
464 struct hci_dev *hdev = data;
465
466 if (val != 0 && val != 1)
467 return -EINVAL;
468
469 hci_dev_lock(hdev);
470 hdev->own_addr_type = val;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476static int own_address_type_get(void *data, u64 *val)
477{
478 struct hci_dev *hdev = data;
479
480 hci_dev_lock(hdev);
481 *val = hdev->own_addr_type;
482 hci_dev_unlock(hdev);
483
484 return 0;
485}
486
487DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
488 own_address_type_set, "%llu\n");
489
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700490static int long_term_keys_show(struct seq_file *f, void *ptr)
491{
492 struct hci_dev *hdev = f->private;
493 struct list_head *p, *n;
494
495 hci_dev_lock(hdev);
496 list_for_each_safe(p, n, &hdev->link_keys) {
497 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
498 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
499 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
500 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
501 8, ltk->rand, 16, ltk->val);
502 }
503 hci_dev_unlock(hdev);
504
505 return 0;
506}
507
508static int long_term_keys_open(struct inode *inode, struct file *file)
509{
510 return single_open(file, long_term_keys_show, inode->i_private);
511}
512
513static const struct file_operations long_term_keys_fops = {
514 .open = long_term_keys_open,
515 .read = seq_read,
516 .llseek = seq_lseek,
517 .release = single_release,
518};
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520/* ---- HCI requests ---- */
521
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 if (hdev->req_status == HCI_REQ_PEND) {
527 hdev->req_result = result;
528 hdev->req_status = HCI_REQ_DONE;
529 wake_up_interruptible(&hdev->req_wait_q);
530 }
531}
532
533static void hci_req_cancel(struct hci_dev *hdev, int err)
534{
535 BT_DBG("%s err 0x%2.2x", hdev->name, err);
536
537 if (hdev->req_status == HCI_REQ_PEND) {
538 hdev->req_result = err;
539 hdev->req_status = HCI_REQ_CANCELED;
540 wake_up_interruptible(&hdev->req_wait_q);
541 }
542}
543
Fengguang Wu77a63e02013-04-20 16:24:31 +0300544static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
545 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300546{
547 struct hci_ev_cmd_complete *ev;
548 struct hci_event_hdr *hdr;
549 struct sk_buff *skb;
550
551 hci_dev_lock(hdev);
552
553 skb = hdev->recv_evt;
554 hdev->recv_evt = NULL;
555
556 hci_dev_unlock(hdev);
557
558 if (!skb)
559 return ERR_PTR(-ENODATA);
560
561 if (skb->len < sizeof(*hdr)) {
562 BT_ERR("Too short HCI event");
563 goto failed;
564 }
565
566 hdr = (void *) skb->data;
567 skb_pull(skb, HCI_EVENT_HDR_SIZE);
568
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300569 if (event) {
570 if (hdr->evt != event)
571 goto failed;
572 return skb;
573 }
574
Johan Hedberg75e84b72013-04-02 13:35:04 +0300575 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
576 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
577 goto failed;
578 }
579
580 if (skb->len < sizeof(*ev)) {
581 BT_ERR("Too short cmd_complete event");
582 goto failed;
583 }
584
585 ev = (void *) skb->data;
586 skb_pull(skb, sizeof(*ev));
587
588 if (opcode == __le16_to_cpu(ev->opcode))
589 return skb;
590
591 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
592 __le16_to_cpu(ev->opcode));
593
594failed:
595 kfree_skb(skb);
596 return ERR_PTR(-ENODATA);
597}
598
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300599struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300600 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300601{
602 DECLARE_WAITQUEUE(wait, current);
603 struct hci_request req;
604 int err = 0;
605
606 BT_DBG("%s", hdev->name);
607
608 hci_req_init(&req, hdev);
609
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300610 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300611
612 hdev->req_status = HCI_REQ_PEND;
613
614 err = hci_req_run(&req, hci_req_sync_complete);
615 if (err < 0)
616 return ERR_PTR(err);
617
618 add_wait_queue(&hdev->req_wait_q, &wait);
619 set_current_state(TASK_INTERRUPTIBLE);
620
621 schedule_timeout(timeout);
622
623 remove_wait_queue(&hdev->req_wait_q, &wait);
624
625 if (signal_pending(current))
626 return ERR_PTR(-EINTR);
627
628 switch (hdev->req_status) {
629 case HCI_REQ_DONE:
630 err = -bt_to_errno(hdev->req_result);
631 break;
632
633 case HCI_REQ_CANCELED:
634 err = -hdev->req_result;
635 break;
636
637 default:
638 err = -ETIMEDOUT;
639 break;
640 }
641
642 hdev->req_status = hdev->req_result = 0;
643
644 BT_DBG("%s end: err %d", hdev->name, err);
645
646 if (err < 0)
647 return ERR_PTR(err);
648
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300649 return hci_get_cmd_complete(hdev, opcode, event);
650}
651EXPORT_SYMBOL(__hci_cmd_sync_ev);
652
653struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300654 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300655{
656 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300657}
658EXPORT_SYMBOL(__hci_cmd_sync);
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200661static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 void (*func)(struct hci_request *req,
663 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200664 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200666 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 DECLARE_WAITQUEUE(wait, current);
668 int err = 0;
669
670 BT_DBG("%s start", hdev->name);
671
Johan Hedberg42c6b122013-03-05 20:37:49 +0200672 hci_req_init(&req, hdev);
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 hdev->req_status = HCI_REQ_PEND;
675
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200677
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 err = hci_req_run(&req, hci_req_sync_complete);
679 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200680 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300681
682 /* ENODATA means the HCI request command queue is empty.
683 * This can happen when a request with conditionals doesn't
684 * trigger any commands to be sent. This is normal behavior
685 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 */
Andre Guedes920c8302013-03-08 11:20:15 -0300687 if (err == -ENODATA)
688 return 0;
689
690 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200691 }
692
Andre Guedesbc4445c2013-03-08 11:20:13 -0300693 add_wait_queue(&hdev->req_wait_q, &wait);
694 set_current_state(TASK_INTERRUPTIBLE);
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 schedule_timeout(timeout);
697
698 remove_wait_queue(&hdev->req_wait_q, &wait);
699
700 if (signal_pending(current))
701 return -EINTR;
702
703 switch (hdev->req_status) {
704 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700705 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 break;
707
708 case HCI_REQ_CANCELED:
709 err = -hdev->req_result;
710 break;
711
712 default:
713 err = -ETIMEDOUT;
714 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Johan Hedberga5040ef2011-01-10 13:28:59 +0200717 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 BT_DBG("%s end: err %d", hdev->name, err);
720
721 return err;
722}
723
Johan Hedberg01178cd2013-03-05 20:37:41 +0200724static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 void (*req)(struct hci_request *req,
726 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200727 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
729 int ret;
730
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200731 if (!test_bit(HCI_UP, &hdev->flags))
732 return -ENETDOWN;
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 /* Serialize all requests */
735 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200736 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 hci_req_unlock(hdev);
738
739 return ret;
740}
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 set_bit(HCI_RESET, &req->hdev->flags);
748 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749}
750
Johan Hedberg42c6b122013-03-05 20:37:49 +0200751static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200753 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200758 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200760
761 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
Johan Hedberg42c6b122013-03-05 20:37:49 +0200765static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200766{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200767 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200768
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200769 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200770 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300771
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700772 /* Read Local Supported Commands */
773 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
774
775 /* Read Local Supported Features */
776 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
777
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300778 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200779 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300780
781 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200782 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700783
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700784 /* Read Flow Control Mode */
785 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
786
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700787 /* Read Location Data */
788 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200789}
790
Johan Hedberg42c6b122013-03-05 20:37:49 +0200791static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200792{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200794
795 BT_DBG("%s %ld", hdev->name, opt);
796
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300797 /* Reset */
798 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200799 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300800
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200801 switch (hdev->dev_type) {
802 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200803 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200804 break;
805
806 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200808 break;
809
810 default:
811 BT_ERR("Unknown device type %d", hdev->dev_type);
812 break;
813 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200814}
815
Johan Hedberg42c6b122013-03-05 20:37:49 +0200816static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200817{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700818 struct hci_dev *hdev = req->hdev;
819
Johan Hedberg2177bab2013-03-05 20:37:43 +0200820 __le16 param;
821 __u8 flt_type;
822
823 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200824 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200825
826 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200828
829 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200830 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200831
832 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200833 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200834
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700835 /* Read Number of Supported IAC */
836 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
837
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700838 /* Read Current IAC LAP */
839 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
840
Johan Hedberg2177bab2013-03-05 20:37:43 +0200841 /* Clear Event Filters */
842 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200843 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200844
845 /* Connection accept timeout ~20 secs */
846 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200847 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200848
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700849 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
850 * but it does not support page scan related HCI commands.
851 */
852 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500853 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
854 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
855 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200856}
857
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200859{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300860 struct hci_dev *hdev = req->hdev;
861
Johan Hedberg2177bab2013-03-05 20:37:43 +0200862 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200864
865 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200866 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200867
868 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200869 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200870
871 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200873
874 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200875 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300876
877 /* LE-only controllers have LE implicitly enabled */
878 if (!lmp_bredr_capable(hdev))
879 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200880}
881
882static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
883{
884 if (lmp_ext_inq_capable(hdev))
885 return 0x02;
886
887 if (lmp_inq_rssi_capable(hdev))
888 return 0x01;
889
890 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
891 hdev->lmp_subver == 0x0757)
892 return 0x01;
893
894 if (hdev->manufacturer == 15) {
895 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
896 return 0x01;
897 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
898 return 0x01;
899 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
900 return 0x01;
901 }
902
903 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
904 hdev->lmp_subver == 0x1805)
905 return 0x01;
906
907 return 0x00;
908}
909
Johan Hedberg42c6b122013-03-05 20:37:49 +0200910static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200911{
912 u8 mode;
913
Johan Hedberg42c6b122013-03-05 20:37:49 +0200914 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200915
Johan Hedberg42c6b122013-03-05 20:37:49 +0200916 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200917}
918
Johan Hedberg42c6b122013-03-05 20:37:49 +0200919static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200920{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 struct hci_dev *hdev = req->hdev;
922
Johan Hedberg2177bab2013-03-05 20:37:43 +0200923 /* The second byte is 0xff instead of 0x9f (two reserved bits
924 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
925 * command otherwise.
926 */
927 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
928
929 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
930 * any event mask for pre 1.2 devices.
931 */
932 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
933 return;
934
935 if (lmp_bredr_capable(hdev)) {
936 events[4] |= 0x01; /* Flow Specification Complete */
937 events[4] |= 0x02; /* Inquiry Result with RSSI */
938 events[4] |= 0x04; /* Read Remote Extended Features Complete */
939 events[5] |= 0x08; /* Synchronous Connection Complete */
940 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700941 } else {
942 /* Use a different default for LE-only devices */
943 memset(events, 0, sizeof(events));
944 events[0] |= 0x10; /* Disconnection Complete */
945 events[0] |= 0x80; /* Encryption Change */
946 events[1] |= 0x08; /* Read Remote Version Information Complete */
947 events[1] |= 0x20; /* Command Complete */
948 events[1] |= 0x40; /* Command Status */
949 events[1] |= 0x80; /* Hardware Error */
950 events[2] |= 0x04; /* Number of Completed Packets */
951 events[3] |= 0x02; /* Data Buffer Overflow */
952 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200953 }
954
955 if (lmp_inq_rssi_capable(hdev))
956 events[4] |= 0x02; /* Inquiry Result with RSSI */
957
958 if (lmp_sniffsubr_capable(hdev))
959 events[5] |= 0x20; /* Sniff Subrating */
960
961 if (lmp_pause_enc_capable(hdev))
962 events[5] |= 0x80; /* Encryption Key Refresh Complete */
963
964 if (lmp_ext_inq_capable(hdev))
965 events[5] |= 0x40; /* Extended Inquiry Result */
966
967 if (lmp_no_flush_capable(hdev))
968 events[7] |= 0x01; /* Enhanced Flush Complete */
969
970 if (lmp_lsto_capable(hdev))
971 events[6] |= 0x80; /* Link Supervision Timeout Changed */
972
973 if (lmp_ssp_capable(hdev)) {
974 events[6] |= 0x01; /* IO Capability Request */
975 events[6] |= 0x02; /* IO Capability Response */
976 events[6] |= 0x04; /* User Confirmation Request */
977 events[6] |= 0x08; /* User Passkey Request */
978 events[6] |= 0x10; /* Remote OOB Data Request */
979 events[6] |= 0x20; /* Simple Pairing Complete */
980 events[7] |= 0x04; /* User Passkey Notification */
981 events[7] |= 0x08; /* Keypress Notification */
982 events[7] |= 0x10; /* Remote Host Supported
983 * Features Notification
984 */
985 }
986
987 if (lmp_le_capable(hdev))
988 events[7] |= 0x20; /* LE Meta-Event */
989
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200991
992 if (lmp_le_capable(hdev)) {
993 memset(events, 0, sizeof(events));
994 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
996 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200997 }
998}
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001001{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 struct hci_dev *hdev = req->hdev;
1003
Johan Hedberg2177bab2013-03-05 20:37:43 +02001004 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001006 else
1007 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001008
1009 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001011
Johan Hedberg42c6b122013-03-05 20:37:49 +02001012 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001013
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001014 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1015 * local supported commands HCI command.
1016 */
1017 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001019
1020 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001021 /* When SSP is available, then the host features page
1022 * should also be available as well. However some
1023 * controllers list the max_page as 0 as long as SSP
1024 * has not been enabled. To achieve proper debugging
1025 * output, force the minimum max_page to 1 at least.
1026 */
1027 hdev->max_page = 0x01;
1028
Johan Hedberg2177bab2013-03-05 20:37:43 +02001029 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1030 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1032 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001033 } else {
1034 struct hci_cp_write_eir cp;
1035
1036 memset(hdev->eir, 0, sizeof(hdev->eir));
1037 memset(&cp, 0, sizeof(cp));
1038
Johan Hedberg42c6b122013-03-05 20:37:49 +02001039 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001040 }
1041 }
1042
1043 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001045
1046 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001047 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001048
1049 if (lmp_ext_feat_capable(hdev)) {
1050 struct hci_cp_read_local_ext_features cp;
1051
1052 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1054 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001055 }
1056
1057 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1058 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001059 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1060 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001061 }
1062}
1063
Johan Hedberg42c6b122013-03-05 20:37:49 +02001064static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001065{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067 struct hci_cp_write_def_link_policy cp;
1068 u16 link_policy = 0;
1069
1070 if (lmp_rswitch_capable(hdev))
1071 link_policy |= HCI_LP_RSWITCH;
1072 if (lmp_hold_capable(hdev))
1073 link_policy |= HCI_LP_HOLD;
1074 if (lmp_sniff_capable(hdev))
1075 link_policy |= HCI_LP_SNIFF;
1076 if (lmp_park_capable(hdev))
1077 link_policy |= HCI_LP_PARK;
1078
1079 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081}
1082
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001085 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001086 struct hci_cp_write_le_host_supported cp;
1087
Johan Hedbergc73eee92013-04-19 18:35:21 +03001088 /* LE-only devices do not support explicit enablement */
1089 if (!lmp_bredr_capable(hdev))
1090 return;
1091
Johan Hedberg2177bab2013-03-05 20:37:43 +02001092 memset(&cp, 0, sizeof(cp));
1093
1094 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1095 cp.le = 0x01;
1096 cp.simul = lmp_le_br_capable(hdev);
1097 }
1098
1099 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001100 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1101 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001102}
1103
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001104static void hci_set_event_mask_page_2(struct hci_request *req)
1105{
1106 struct hci_dev *hdev = req->hdev;
1107 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1108
1109 /* If Connectionless Slave Broadcast master role is supported
1110 * enable all necessary events for it.
1111 */
1112 if (hdev->features[2][0] & 0x01) {
1113 events[1] |= 0x40; /* Triggered Clock Capture */
1114 events[1] |= 0x80; /* Synchronization Train Complete */
1115 events[2] |= 0x10; /* Slave Page Response Timeout */
1116 events[2] |= 0x20; /* CSB Channel Map Change */
1117 }
1118
1119 /* If Connectionless Slave Broadcast slave role is supported
1120 * enable all necessary events for it.
1121 */
1122 if (hdev->features[2][0] & 0x02) {
1123 events[2] |= 0x01; /* Synchronization Train Received */
1124 events[2] |= 0x02; /* CSB Receive */
1125 events[2] |= 0x04; /* CSB Timeout */
1126 events[2] |= 0x08; /* Truncated Page Complete */
1127 }
1128
1129 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1130}
1131
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001133{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001134 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001135 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001137 /* Some Broadcom based Bluetooth controllers do not support the
1138 * Delete Stored Link Key command. They are clearly indicating its
1139 * absence in the bit mask of supported commands.
1140 *
1141 * Check the supported commands and only if the the command is marked
1142 * as supported send it. If not supported assume that the controller
1143 * does not have actual support for stored link keys which makes this
1144 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001145 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001146 if (hdev->commands[6] & 0x80) {
1147 struct hci_cp_delete_stored_link_key cp;
1148
1149 bacpy(&cp.bdaddr, BDADDR_ANY);
1150 cp.delete_all = 0x01;
1151 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1152 sizeof(cp), &cp);
1153 }
1154
Johan Hedberg2177bab2013-03-05 20:37:43 +02001155 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001157
Marcel Holtmann79830f62013-10-18 16:38:09 -07001158 if (lmp_le_capable(hdev)) {
1159 /* If the controller has a public BD_ADDR, then by
1160 * default use that one. If this is a LE only
1161 * controller without one, default to the random
1162 * address.
1163 */
1164 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1165 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1166 else
1167 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1168
Johan Hedberg42c6b122013-03-05 20:37:49 +02001169 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001170 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001171
1172 /* Read features beyond page 1 if available */
1173 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1174 struct hci_cp_read_local_ext_features cp;
1175
1176 cp.page = p;
1177 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1178 sizeof(cp), &cp);
1179 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001180}
1181
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001182static void hci_init4_req(struct hci_request *req, unsigned long opt)
1183{
1184 struct hci_dev *hdev = req->hdev;
1185
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001186 /* Set event mask page 2 if the HCI command for it is supported */
1187 if (hdev->commands[22] & 0x04)
1188 hci_set_event_mask_page_2(req);
1189
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001190 /* Check for Synchronization Train support */
1191 if (hdev->features[2][0] & 0x04)
1192 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1193}
1194
Johan Hedberg2177bab2013-03-05 20:37:43 +02001195static int __hci_init(struct hci_dev *hdev)
1196{
1197 int err;
1198
1199 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1200 if (err < 0)
1201 return err;
1202
1203 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1204 * BR/EDR/LE type controllers. AMP controllers only need the
1205 * first stage init.
1206 */
1207 if (hdev->dev_type != HCI_BREDR)
1208 return 0;
1209
1210 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1211 if (err < 0)
1212 return err;
1213
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001214 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1215 if (err < 0)
1216 return err;
1217
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001218 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1219 if (err < 0)
1220 return err;
1221
1222 /* Only create debugfs entries during the initial setup
1223 * phase and not every time the controller gets powered on.
1224 */
1225 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1226 return 0;
1227
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001228 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1229 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001230 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1231 &hdev->manufacturer);
1232 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1233 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001234 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1235 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001236 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001238 if (lmp_bredr_capable(hdev)) {
1239 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1240 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001241 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1242 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001243 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1244 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001245 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1246 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001247 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1248 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001249 }
1250
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001251 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001252 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1253 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001254 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1255 hdev, &ssp_debug_mode_fops);
1256 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001257
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001258 if (lmp_sniff_capable(hdev)) {
1259 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1260 hdev, &idle_timeout_fops);
1261 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1262 hdev, &sniff_min_interval_fops);
1263 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1264 hdev, &sniff_max_interval_fops);
1265 }
1266
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001267 if (lmp_le_capable(hdev)) {
1268 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1269 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001270 debugfs_create_file("static_address", 0444, hdev->debugfs,
1271 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001272 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1273 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001274 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1275 hdev, &long_term_keys_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001276 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001277
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001278 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001279}
1280
Johan Hedberg42c6b122013-03-05 20:37:49 +02001281static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
1283 __u8 scan = opt;
1284
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289}
1290
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
1293 __u8 auth = opt;
1294
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302{
1303 __u8 encrypt = opt;
1304
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001307 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309}
1310
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001312{
1313 __le16 policy = cpu_to_le16(opt);
1314
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001316
1317 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001319}
1320
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001321/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 * Device is held on return. */
1323struct hci_dev *hci_dev_get(int index)
1324{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001325 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
1327 BT_DBG("%d", index);
1328
1329 if (index < 0)
1330 return NULL;
1331
1332 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001333 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 if (d->id == index) {
1335 hdev = hci_dev_hold(d);
1336 break;
1337 }
1338 }
1339 read_unlock(&hci_dev_list_lock);
1340 return hdev;
1341}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
1343/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001344
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001345bool hci_discovery_active(struct hci_dev *hdev)
1346{
1347 struct discovery_state *discov = &hdev->discovery;
1348
Andre Guedes6fbe1952012-02-03 17:47:58 -03001349 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001350 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001351 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001352 return true;
1353
Andre Guedes6fbe1952012-02-03 17:47:58 -03001354 default:
1355 return false;
1356 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001357}
1358
Johan Hedbergff9ef572012-01-04 14:23:45 +02001359void hci_discovery_set_state(struct hci_dev *hdev, int state)
1360{
1361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1362
1363 if (hdev->discovery.state == state)
1364 return;
1365
1366 switch (state) {
1367 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001368 if (hdev->discovery.state != DISCOVERY_STARTING)
1369 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001370 break;
1371 case DISCOVERY_STARTING:
1372 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001373 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001374 mgmt_discovering(hdev, 1);
1375 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001376 case DISCOVERY_RESOLVING:
1377 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001378 case DISCOVERY_STOPPING:
1379 break;
1380 }
1381
1382 hdev->discovery.state = state;
1383}
1384
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001385void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386{
Johan Hedberg30883512012-01-04 14:16:21 +02001387 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001388 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
Johan Hedberg561aafb2012-01-04 13:31:59 +02001390 list_for_each_entry_safe(p, n, &cache->all, all) {
1391 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001392 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001394
1395 INIT_LIST_HEAD(&cache->unknown);
1396 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397}
1398
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1400 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401{
Johan Hedberg30883512012-01-04 14:16:21 +02001402 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 struct inquiry_entry *e;
1404
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001405 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
Johan Hedberg561aafb2012-01-04 13:31:59 +02001407 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001409 return e;
1410 }
1411
1412 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413}
1414
Johan Hedberg561aafb2012-01-04 13:31:59 +02001415struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001416 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001417{
Johan Hedberg30883512012-01-04 14:16:21 +02001418 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001419 struct inquiry_entry *e;
1420
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001421 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001422
1423 list_for_each_entry(e, &cache->unknown, list) {
1424 if (!bacmp(&e->data.bdaddr, bdaddr))
1425 return e;
1426 }
1427
1428 return NULL;
1429}
1430
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001431struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001432 bdaddr_t *bdaddr,
1433 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001434{
1435 struct discovery_state *cache = &hdev->discovery;
1436 struct inquiry_entry *e;
1437
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001438 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001439
1440 list_for_each_entry(e, &cache->resolve, list) {
1441 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1442 return e;
1443 if (!bacmp(&e->data.bdaddr, bdaddr))
1444 return e;
1445 }
1446
1447 return NULL;
1448}
1449
Johan Hedberga3d4e202012-01-09 00:53:02 +02001450void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001451 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001452{
1453 struct discovery_state *cache = &hdev->discovery;
1454 struct list_head *pos = &cache->resolve;
1455 struct inquiry_entry *p;
1456
1457 list_del(&ie->list);
1458
1459 list_for_each_entry(p, &cache->resolve, list) {
1460 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001461 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001462 break;
1463 pos = &p->list;
1464 }
1465
1466 list_add(&ie->list, pos);
1467}
1468
Johan Hedberg31754052012-01-04 13:39:52 +02001469bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001470 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
Johan Hedberg30883512012-01-04 14:16:21 +02001472 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001473 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001475 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Szymon Janc2b2fec42012-11-20 11:38:54 +01001477 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1478
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001479 if (ssp)
1480 *ssp = data->ssp_mode;
1481
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001482 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001483 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001484 if (ie->data.ssp_mode && ssp)
1485 *ssp = true;
1486
Johan Hedberga3d4e202012-01-09 00:53:02 +02001487 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001488 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001489 ie->data.rssi = data->rssi;
1490 hci_inquiry_cache_update_resolve(hdev, ie);
1491 }
1492
Johan Hedberg561aafb2012-01-04 13:31:59 +02001493 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001494 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001495
Johan Hedberg561aafb2012-01-04 13:31:59 +02001496 /* Entry not in the cache. Add new one. */
1497 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1498 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001499 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001500
1501 list_add(&ie->all, &cache->all);
1502
1503 if (name_known) {
1504 ie->name_state = NAME_KNOWN;
1505 } else {
1506 ie->name_state = NAME_NOT_KNOWN;
1507 list_add(&ie->list, &cache->unknown);
1508 }
1509
1510update:
1511 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001512 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001513 ie->name_state = NAME_KNOWN;
1514 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 }
1516
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001517 memcpy(&ie->data, data, sizeof(*data));
1518 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001520
1521 if (ie->name_state == NAME_NOT_KNOWN)
1522 return false;
1523
1524 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525}
1526
1527static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1528{
Johan Hedberg30883512012-01-04 14:16:21 +02001529 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 struct inquiry_info *info = (struct inquiry_info *) buf;
1531 struct inquiry_entry *e;
1532 int copied = 0;
1533
Johan Hedberg561aafb2012-01-04 13:31:59 +02001534 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001536
1537 if (copied >= num)
1538 break;
1539
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 bacpy(&info->bdaddr, &data->bdaddr);
1541 info->pscan_rep_mode = data->pscan_rep_mode;
1542 info->pscan_period_mode = data->pscan_period_mode;
1543 info->pscan_mode = data->pscan_mode;
1544 memcpy(info->dev_class, data->dev_class, 3);
1545 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001548 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 }
1550
1551 BT_DBG("cache %p, copied %d", cache, copied);
1552 return copied;
1553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
1557 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 struct hci_cp_inquiry cp;
1560
1561 BT_DBG("%s", hdev->name);
1562
1563 if (test_bit(HCI_INQUIRY, &hdev->flags))
1564 return;
1565
1566 /* Start Inquiry */
1567 memcpy(&cp.lap, &ir->lap, 3);
1568 cp.length = ir->length;
1569 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
1572
Andre Guedes3e13fa12013-03-27 20:04:56 -03001573static int wait_inquiry(void *word)
1574{
1575 schedule();
1576 return signal_pending(current);
1577}
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579int hci_inquiry(void __user *arg)
1580{
1581 __u8 __user *ptr = arg;
1582 struct hci_inquiry_req ir;
1583 struct hci_dev *hdev;
1584 int err = 0, do_inquiry = 0, max_rsp;
1585 long timeo;
1586 __u8 *buf;
1587
1588 if (copy_from_user(&ir, ptr, sizeof(ir)))
1589 return -EFAULT;
1590
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001591 hdev = hci_dev_get(ir.dev_id);
1592 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 return -ENODEV;
1594
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1596 err = -EBUSY;
1597 goto done;
1598 }
1599
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001600 if (hdev->dev_type != HCI_BREDR) {
1601 err = -EOPNOTSUPP;
1602 goto done;
1603 }
1604
Johan Hedberg56f87902013-10-02 13:43:13 +03001605 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1606 err = -EOPNOTSUPP;
1607 goto done;
1608 }
1609
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001610 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001611 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001612 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001613 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 do_inquiry = 1;
1615 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001616 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Marcel Holtmann04837f62006-07-03 10:02:33 +02001618 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001619
1620 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001621 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1622 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001623 if (err < 0)
1624 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001625
1626 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1627 * cleared). If it is interrupted by a signal, return -EINTR.
1628 */
1629 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1630 TASK_INTERRUPTIBLE))
1631 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001634 /* for unlimited number of responses we will use buffer with
1635 * 255 entries
1636 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1638
1639 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1640 * copy it to the user space.
1641 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001642 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001643 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 err = -ENOMEM;
1645 goto done;
1646 }
1647
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001648 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001650 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
1652 BT_DBG("num_rsp %d", ir.num_rsp);
1653
1654 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1655 ptr += sizeof(ir);
1656 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001657 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001659 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 err = -EFAULT;
1661
1662 kfree(buf);
1663
1664done:
1665 hci_dev_put(hdev);
1666 return err;
1667}
1668
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001669static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 int ret = 0;
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 BT_DBG("%s %p", hdev->name, hdev);
1674
1675 hci_req_lock(hdev);
1676
Johan Hovold94324962012-03-15 14:48:41 +01001677 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1678 ret = -ENODEV;
1679 goto done;
1680 }
1681
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001682 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683 /* Check for rfkill but allow the HCI setup stage to
1684 * proceed (which in itself doesn't cause any RF activity).
1685 */
1686 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1687 ret = -ERFKILL;
1688 goto done;
1689 }
1690
1691 /* Check for valid public address or a configured static
1692 * random adddress, but let the HCI setup proceed to
1693 * be able to determine if there is a public address
1694 * or not.
1695 *
1696 * This check is only valid for BR/EDR controllers
1697 * since AMP controllers do not have an address.
1698 */
1699 if (hdev->dev_type == HCI_BREDR &&
1700 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1701 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1702 ret = -EADDRNOTAVAIL;
1703 goto done;
1704 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001705 }
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 if (test_bit(HCI_UP, &hdev->flags)) {
1708 ret = -EALREADY;
1709 goto done;
1710 }
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 if (hdev->open(hdev)) {
1713 ret = -EIO;
1714 goto done;
1715 }
1716
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001717 atomic_set(&hdev->cmd_cnt, 1);
1718 set_bit(HCI_INIT, &hdev->flags);
1719
1720 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1721 ret = hdev->setup(hdev);
1722
1723 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001724 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1725 set_bit(HCI_RAW, &hdev->flags);
1726
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001727 if (!test_bit(HCI_RAW, &hdev->flags) &&
1728 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001729 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 }
1731
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001732 clear_bit(HCI_INIT, &hdev->flags);
1733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 if (!ret) {
1735 hci_dev_hold(hdev);
1736 set_bit(HCI_UP, &hdev->flags);
1737 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001738 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001739 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001740 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001741 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001742 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001743 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001744 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001745 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001747 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001748 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001749 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 skb_queue_purge(&hdev->cmd_q);
1752 skb_queue_purge(&hdev->rx_q);
1753
1754 if (hdev->flush)
1755 hdev->flush(hdev);
1756
1757 if (hdev->sent_cmd) {
1758 kfree_skb(hdev->sent_cmd);
1759 hdev->sent_cmd = NULL;
1760 }
1761
1762 hdev->close(hdev);
1763 hdev->flags = 0;
1764 }
1765
1766done:
1767 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 return ret;
1769}
1770
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001771/* ---- HCI ioctl helpers ---- */
1772
1773int hci_dev_open(__u16 dev)
1774{
1775 struct hci_dev *hdev;
1776 int err;
1777
1778 hdev = hci_dev_get(dev);
1779 if (!hdev)
1780 return -ENODEV;
1781
Johan Hedberge1d08f42013-10-01 22:44:50 +03001782 /* We need to ensure that no other power on/off work is pending
1783 * before proceeding to call hci_dev_do_open. This is
1784 * particularly important if the setup procedure has not yet
1785 * completed.
1786 */
1787 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1788 cancel_delayed_work(&hdev->power_off);
1789
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001790 /* After this call it is guaranteed that the setup procedure
1791 * has finished. This means that error conditions like RFKILL
1792 * or no valid public or static random address apply.
1793 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001794 flush_workqueue(hdev->req_workqueue);
1795
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001796 err = hci_dev_do_open(hdev);
1797
1798 hci_dev_put(hdev);
1799
1800 return err;
1801}
1802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803static int hci_dev_do_close(struct hci_dev *hdev)
1804{
1805 BT_DBG("%s %p", hdev->name, hdev);
1806
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001807 cancel_delayed_work(&hdev->power_off);
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 hci_req_cancel(hdev, ENODEV);
1810 hci_req_lock(hdev);
1811
1812 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001813 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 hci_req_unlock(hdev);
1815 return 0;
1816 }
1817
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001818 /* Flush RX and TX works */
1819 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001820 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001822 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001823 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001824 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001825 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001826 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001827 }
1828
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001829 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001830 cancel_delayed_work(&hdev->service_cache);
1831
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001832 cancel_delayed_work_sync(&hdev->le_scan_disable);
1833
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001834 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001835 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001837 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 hci_notify(hdev, HCI_DEV_DOWN);
1840
1841 if (hdev->flush)
1842 hdev->flush(hdev);
1843
1844 /* Reset device */
1845 skb_queue_purge(&hdev->cmd_q);
1846 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001847 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001848 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001849 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001851 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 clear_bit(HCI_INIT, &hdev->flags);
1853 }
1854
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001855 /* flush cmd work */
1856 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
1858 /* Drop queues */
1859 skb_queue_purge(&hdev->rx_q);
1860 skb_queue_purge(&hdev->cmd_q);
1861 skb_queue_purge(&hdev->raw_q);
1862
1863 /* Drop last sent command */
1864 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001865 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 kfree_skb(hdev->sent_cmd);
1867 hdev->sent_cmd = NULL;
1868 }
1869
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001870 kfree_skb(hdev->recv_evt);
1871 hdev->recv_evt = NULL;
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 /* After this point our queues are empty
1874 * and no tasks are scheduled. */
1875 hdev->close(hdev);
1876
Johan Hedberg35b973c2013-03-15 17:06:59 -05001877 /* Clear flags */
1878 hdev->flags = 0;
1879 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1880
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001881 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1882 if (hdev->dev_type == HCI_BREDR) {
1883 hci_dev_lock(hdev);
1884 mgmt_powered(hdev, 0);
1885 hci_dev_unlock(hdev);
1886 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001887 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001888
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001889 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001890 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001891
Johan Hedberge59fda82012-02-22 18:11:53 +02001892 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001893 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 hci_req_unlock(hdev);
1896
1897 hci_dev_put(hdev);
1898 return 0;
1899}
1900
1901int hci_dev_close(__u16 dev)
1902{
1903 struct hci_dev *hdev;
1904 int err;
1905
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001906 hdev = hci_dev_get(dev);
1907 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001909
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001910 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1911 err = -EBUSY;
1912 goto done;
1913 }
1914
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001915 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1916 cancel_delayed_work(&hdev->power_off);
1917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001919
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001920done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 hci_dev_put(hdev);
1922 return err;
1923}
1924
1925int hci_dev_reset(__u16 dev)
1926{
1927 struct hci_dev *hdev;
1928 int ret = 0;
1929
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001930 hdev = hci_dev_get(dev);
1931 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 return -ENODEV;
1933
1934 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
Marcel Holtmann808a0492013-08-26 20:57:58 -07001936 if (!test_bit(HCI_UP, &hdev->flags)) {
1937 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001939 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001941 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1942 ret = -EBUSY;
1943 goto done;
1944 }
1945
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 /* Drop queues */
1947 skb_queue_purge(&hdev->rx_q);
1948 skb_queue_purge(&hdev->cmd_q);
1949
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001950 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001951 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001953 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
1955 if (hdev->flush)
1956 hdev->flush(hdev);
1957
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001958 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001959 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001962 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 hci_req_unlock(hdev);
1966 hci_dev_put(hdev);
1967 return ret;
1968}
1969
1970int hci_dev_reset_stat(__u16 dev)
1971{
1972 struct hci_dev *hdev;
1973 int ret = 0;
1974
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001975 hdev = hci_dev_get(dev);
1976 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 return -ENODEV;
1978
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001979 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1980 ret = -EBUSY;
1981 goto done;
1982 }
1983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1985
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001986done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 return ret;
1989}
1990
1991int hci_dev_cmd(unsigned int cmd, void __user *arg)
1992{
1993 struct hci_dev *hdev;
1994 struct hci_dev_req dr;
1995 int err = 0;
1996
1997 if (copy_from_user(&dr, arg, sizeof(dr)))
1998 return -EFAULT;
1999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002000 hdev = hci_dev_get(dr.dev_id);
2001 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 return -ENODEV;
2003
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002004 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2005 err = -EBUSY;
2006 goto done;
2007 }
2008
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002009 if (hdev->dev_type != HCI_BREDR) {
2010 err = -EOPNOTSUPP;
2011 goto done;
2012 }
2013
Johan Hedberg56f87902013-10-02 13:43:13 +03002014 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2015 err = -EOPNOTSUPP;
2016 goto done;
2017 }
2018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 switch (cmd) {
2020 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002021 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2022 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 break;
2024
2025 case HCISETENCRYPT:
2026 if (!lmp_encrypt_capable(hdev)) {
2027 err = -EOPNOTSUPP;
2028 break;
2029 }
2030
2031 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2032 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002033 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2034 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 if (err)
2036 break;
2037 }
2038
Johan Hedberg01178cd2013-03-05 20:37:41 +02002039 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2040 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 break;
2042
2043 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002044 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2045 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 break;
2047
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002048 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002049 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2050 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002051 break;
2052
2053 case HCISETLINKMODE:
2054 hdev->link_mode = ((__u16) dr.dev_opt) &
2055 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2056 break;
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 case HCISETPTYPE:
2059 hdev->pkt_type = (__u16) dr.dev_opt;
2060 break;
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002063 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2064 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 break;
2066
2067 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002068 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2069 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 break;
2071
2072 default:
2073 err = -EINVAL;
2074 break;
2075 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002076
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002077done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 hci_dev_put(hdev);
2079 return err;
2080}
2081
2082int hci_get_dev_list(void __user *arg)
2083{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002084 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 struct hci_dev_list_req *dl;
2086 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 int n = 0, size, err;
2088 __u16 dev_num;
2089
2090 if (get_user(dev_num, (__u16 __user *) arg))
2091 return -EFAULT;
2092
2093 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2094 return -EINVAL;
2095
2096 size = sizeof(*dl) + dev_num * sizeof(*dr);
2097
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002098 dl = kzalloc(size, GFP_KERNEL);
2099 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 return -ENOMEM;
2101
2102 dr = dl->dev_req;
2103
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002104 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002105 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002106 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002107 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002108
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002109 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2110 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 (dr + n)->dev_id = hdev->id;
2113 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 if (++n >= dev_num)
2116 break;
2117 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002118 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 dl->dev_num = n;
2121 size = sizeof(*dl) + n * sizeof(*dr);
2122
2123 err = copy_to_user(arg, dl, size);
2124 kfree(dl);
2125
2126 return err ? -EFAULT : 0;
2127}
2128
2129int hci_get_dev_info(void __user *arg)
2130{
2131 struct hci_dev *hdev;
2132 struct hci_dev_info di;
2133 int err = 0;
2134
2135 if (copy_from_user(&di, arg, sizeof(di)))
2136 return -EFAULT;
2137
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002138 hdev = hci_dev_get(di.dev_id);
2139 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 return -ENODEV;
2141
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002142 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002143 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002144
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002145 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2146 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 strcpy(di.name, hdev->name);
2149 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002150 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 di.flags = hdev->flags;
2152 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002153 if (lmp_bredr_capable(hdev)) {
2154 di.acl_mtu = hdev->acl_mtu;
2155 di.acl_pkts = hdev->acl_pkts;
2156 di.sco_mtu = hdev->sco_mtu;
2157 di.sco_pkts = hdev->sco_pkts;
2158 } else {
2159 di.acl_mtu = hdev->le_mtu;
2160 di.acl_pkts = hdev->le_pkts;
2161 di.sco_mtu = 0;
2162 di.sco_pkts = 0;
2163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 di.link_policy = hdev->link_policy;
2165 di.link_mode = hdev->link_mode;
2166
2167 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2168 memcpy(&di.features, &hdev->features, sizeof(di.features));
2169
2170 if (copy_to_user(arg, &di, sizeof(di)))
2171 err = -EFAULT;
2172
2173 hci_dev_put(hdev);
2174
2175 return err;
2176}
2177
2178/* ---- Interface to HCI drivers ---- */
2179
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002180static int hci_rfkill_set_block(void *data, bool blocked)
2181{
2182 struct hci_dev *hdev = data;
2183
2184 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2185
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002186 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2187 return -EBUSY;
2188
Johan Hedberg5e130362013-09-13 08:58:17 +03002189 if (blocked) {
2190 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002191 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2192 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002193 } else {
2194 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002195 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002196
2197 return 0;
2198}
2199
2200static const struct rfkill_ops hci_rfkill_ops = {
2201 .set_block = hci_rfkill_set_block,
2202};
2203
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002204static void hci_power_on(struct work_struct *work)
2205{
2206 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002207 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002208
2209 BT_DBG("%s", hdev->name);
2210
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002211 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002212 if (err < 0) {
2213 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002214 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002215 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002216
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002217 /* During the HCI setup phase, a few error conditions are
2218 * ignored and they need to be checked now. If they are still
2219 * valid, it is important to turn the device back off.
2220 */
2221 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2222 (hdev->dev_type == HCI_BREDR &&
2223 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2224 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002225 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2226 hci_dev_do_close(hdev);
2227 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002228 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2229 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002230 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002231
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002232 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002233 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002234}
2235
2236static void hci_power_off(struct work_struct *work)
2237{
Johan Hedberg32435532011-11-07 22:16:04 +02002238 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002239 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002240
2241 BT_DBG("%s", hdev->name);
2242
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002243 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002244}
2245
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002246static void hci_discov_off(struct work_struct *work)
2247{
2248 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002249
2250 hdev = container_of(work, struct hci_dev, discov_off.work);
2251
2252 BT_DBG("%s", hdev->name);
2253
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002254 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002255}
2256
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002257int hci_uuids_clear(struct hci_dev *hdev)
2258{
Johan Hedberg48210022013-01-27 00:31:28 +02002259 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002260
Johan Hedberg48210022013-01-27 00:31:28 +02002261 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2262 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002263 kfree(uuid);
2264 }
2265
2266 return 0;
2267}
2268
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002269int hci_link_keys_clear(struct hci_dev *hdev)
2270{
2271 struct list_head *p, *n;
2272
2273 list_for_each_safe(p, n, &hdev->link_keys) {
2274 struct link_key *key;
2275
2276 key = list_entry(p, struct link_key, list);
2277
2278 list_del(p);
2279 kfree(key);
2280 }
2281
2282 return 0;
2283}
2284
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002285int hci_smp_ltks_clear(struct hci_dev *hdev)
2286{
2287 struct smp_ltk *k, *tmp;
2288
2289 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2290 list_del(&k->list);
2291 kfree(k);
2292 }
2293
2294 return 0;
2295}
2296
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002297struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2298{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002299 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002300
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002301 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002302 if (bacmp(bdaddr, &k->bdaddr) == 0)
2303 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002304
2305 return NULL;
2306}
2307
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302308static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002309 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002310{
2311 /* Legacy key */
2312 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302313 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002314
2315 /* Debug keys are insecure so don't store them persistently */
2316 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302317 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002318
2319 /* Changed combination key and there's no previous one */
2320 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302321 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002322
2323 /* Security mode 3 case */
2324 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302325 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002326
2327 /* Neither local nor remote side had no-bonding as requirement */
2328 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302329 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002330
2331 /* Local side had dedicated bonding as requirement */
2332 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302333 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002334
2335 /* Remote side had dedicated bonding as requirement */
2336 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302337 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002338
2339 /* If none of the above criteria match, then don't store the key
2340 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302341 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002342}
2343
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002344struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002345{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002346 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002347
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002348 list_for_each_entry(k, &hdev->long_term_keys, list) {
2349 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002350 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002351 continue;
2352
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002353 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002354 }
2355
2356 return NULL;
2357}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002358
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002359struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002360 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002361{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002362 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002363
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002364 list_for_each_entry(k, &hdev->long_term_keys, list)
2365 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002366 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002367 return k;
2368
2369 return NULL;
2370}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002371
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002372int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002373 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002374{
2375 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302376 u8 old_key_type;
2377 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002378
2379 old_key = hci_find_link_key(hdev, bdaddr);
2380 if (old_key) {
2381 old_key_type = old_key->type;
2382 key = old_key;
2383 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002384 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002385 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2386 if (!key)
2387 return -ENOMEM;
2388 list_add(&key->list, &hdev->link_keys);
2389 }
2390
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002391 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002392
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002393 /* Some buggy controller combinations generate a changed
2394 * combination key for legacy pairing even when there's no
2395 * previous key */
2396 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002397 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002398 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002399 if (conn)
2400 conn->key_type = type;
2401 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002402
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002403 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002404 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002405 key->pin_len = pin_len;
2406
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002407 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002408 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002409 else
2410 key->type = type;
2411
Johan Hedberg4df378a2011-04-28 11:29:03 -07002412 if (!new_key)
2413 return 0;
2414
2415 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2416
Johan Hedberg744cf192011-11-08 20:40:14 +02002417 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002418
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302419 if (conn)
2420 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002421
2422 return 0;
2423}
2424
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002425int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002426 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002427 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002428{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002429 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002430
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002431 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2432 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002433
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002434 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2435 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002436 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002437 else {
2438 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002439 if (!key)
2440 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002441 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002442 }
2443
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002444 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002445 key->bdaddr_type = addr_type;
2446 memcpy(key->val, tk, sizeof(key->val));
2447 key->authenticated = authenticated;
2448 key->ediv = ediv;
2449 key->enc_size = enc_size;
2450 key->type = type;
2451 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002452
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002453 if (!new_key)
2454 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002455
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002456 if (type & HCI_SMP_LTK)
2457 mgmt_new_ltk(hdev, key, 1);
2458
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002459 return 0;
2460}
2461
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002462int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2463{
2464 struct link_key *key;
2465
2466 key = hci_find_link_key(hdev, bdaddr);
2467 if (!key)
2468 return -ENOENT;
2469
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002471
2472 list_del(&key->list);
2473 kfree(key);
2474
2475 return 0;
2476}
2477
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002478int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2479{
2480 struct smp_ltk *k, *tmp;
2481
2482 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2483 if (bacmp(bdaddr, &k->bdaddr))
2484 continue;
2485
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002486 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002487
2488 list_del(&k->list);
2489 kfree(k);
2490 }
2491
2492 return 0;
2493}
2494
Ville Tervo6bd32322011-02-16 16:32:41 +02002495/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002496static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002497{
2498 struct hci_dev *hdev = (void *) arg;
2499
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002500 if (hdev->sent_cmd) {
2501 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2502 u16 opcode = __le16_to_cpu(sent->opcode);
2503
2504 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2505 } else {
2506 BT_ERR("%s command tx timeout", hdev->name);
2507 }
2508
Ville Tervo6bd32322011-02-16 16:32:41 +02002509 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002510 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002511}
2512
Szymon Janc2763eda2011-03-22 13:12:22 +01002513struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002514 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002515{
2516 struct oob_data *data;
2517
2518 list_for_each_entry(data, &hdev->remote_oob_data, list)
2519 if (bacmp(bdaddr, &data->bdaddr) == 0)
2520 return data;
2521
2522 return NULL;
2523}
2524
2525int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2526{
2527 struct oob_data *data;
2528
2529 data = hci_find_remote_oob_data(hdev, bdaddr);
2530 if (!data)
2531 return -ENOENT;
2532
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002533 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002534
2535 list_del(&data->list);
2536 kfree(data);
2537
2538 return 0;
2539}
2540
2541int hci_remote_oob_data_clear(struct hci_dev *hdev)
2542{
2543 struct oob_data *data, *n;
2544
2545 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2546 list_del(&data->list);
2547 kfree(data);
2548 }
2549
2550 return 0;
2551}
2552
2553int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002554 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002555{
2556 struct oob_data *data;
2557
2558 data = hci_find_remote_oob_data(hdev, bdaddr);
2559
2560 if (!data) {
2561 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2562 if (!data)
2563 return -ENOMEM;
2564
2565 bacpy(&data->bdaddr, bdaddr);
2566 list_add(&data->list, &hdev->remote_oob_data);
2567 }
2568
2569 memcpy(data->hash, hash, sizeof(data->hash));
2570 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2571
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002573
2574 return 0;
2575}
2576
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002577struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2578 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002579{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002580 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002581
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002582 list_for_each_entry(b, &hdev->blacklist, list) {
2583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002584 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002585 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002586
2587 return NULL;
2588}
2589
2590int hci_blacklist_clear(struct hci_dev *hdev)
2591{
2592 struct list_head *p, *n;
2593
2594 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002596
2597 list_del(p);
2598 kfree(b);
2599 }
2600
2601 return 0;
2602}
2603
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002604int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002605{
2606 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002607
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002608 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002609 return -EBADF;
2610
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002611 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002612 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002613
2614 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002615 if (!entry)
2616 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002617
2618 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002619 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002620
2621 list_add(&entry->list, &hdev->blacklist);
2622
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002623 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002624}
2625
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002626int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002627{
2628 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002629
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002630 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002631 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002632
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002633 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002634 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002635 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002636
2637 list_del(&entry->list);
2638 kfree(entry);
2639
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002640 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002641}
2642
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002643static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002644{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002645 if (status) {
2646 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002647
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002648 hci_dev_lock(hdev);
2649 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2650 hci_dev_unlock(hdev);
2651 return;
2652 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002653}
2654
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002655static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002656{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002657 /* General inquiry access code (GIAC) */
2658 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2659 struct hci_request req;
2660 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002661 int err;
2662
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002663 if (status) {
2664 BT_ERR("Failed to disable LE scanning: status %d", status);
2665 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002666 }
2667
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002668 switch (hdev->discovery.type) {
2669 case DISCOV_TYPE_LE:
2670 hci_dev_lock(hdev);
2671 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2672 hci_dev_unlock(hdev);
2673 break;
2674
2675 case DISCOV_TYPE_INTERLEAVED:
2676 hci_req_init(&req, hdev);
2677
2678 memset(&cp, 0, sizeof(cp));
2679 memcpy(&cp.lap, lap, sizeof(cp.lap));
2680 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2681 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2682
2683 hci_dev_lock(hdev);
2684
2685 hci_inquiry_cache_flush(hdev);
2686
2687 err = hci_req_run(&req, inquiry_complete);
2688 if (err) {
2689 BT_ERR("Inquiry request failed: err %d", err);
2690 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2691 }
2692
2693 hci_dev_unlock(hdev);
2694 break;
2695 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002696}
2697
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002698static void le_scan_disable_work(struct work_struct *work)
2699{
2700 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002701 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002702 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002703 struct hci_request req;
2704 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002705
2706 BT_DBG("%s", hdev->name);
2707
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002708 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002709
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002710 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002711 cp.enable = LE_SCAN_DISABLE;
2712 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002713
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002714 err = hci_req_run(&req, le_scan_disable_work_complete);
2715 if (err)
2716 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002717}
2718
David Herrmann9be0dab2012-04-22 14:39:57 +02002719/* Alloc HCI device */
2720struct hci_dev *hci_alloc_dev(void)
2721{
2722 struct hci_dev *hdev;
2723
2724 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2725 if (!hdev)
2726 return NULL;
2727
David Herrmannb1b813d2012-04-22 14:39:58 +02002728 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2729 hdev->esco_type = (ESCO_HV1);
2730 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002731 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2732 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002733 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2734 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002735
David Herrmannb1b813d2012-04-22 14:39:58 +02002736 hdev->sniff_max_interval = 800;
2737 hdev->sniff_min_interval = 80;
2738
Marcel Holtmannbef64732013-10-11 08:23:19 -07002739 hdev->le_scan_interval = 0x0060;
2740 hdev->le_scan_window = 0x0030;
2741
David Herrmannb1b813d2012-04-22 14:39:58 +02002742 mutex_init(&hdev->lock);
2743 mutex_init(&hdev->req_lock);
2744
2745 INIT_LIST_HEAD(&hdev->mgmt_pending);
2746 INIT_LIST_HEAD(&hdev->blacklist);
2747 INIT_LIST_HEAD(&hdev->uuids);
2748 INIT_LIST_HEAD(&hdev->link_keys);
2749 INIT_LIST_HEAD(&hdev->long_term_keys);
2750 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002751 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002752
2753 INIT_WORK(&hdev->rx_work, hci_rx_work);
2754 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2755 INIT_WORK(&hdev->tx_work, hci_tx_work);
2756 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002757
David Herrmannb1b813d2012-04-22 14:39:58 +02002758 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2759 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2760 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2761
David Herrmannb1b813d2012-04-22 14:39:58 +02002762 skb_queue_head_init(&hdev->rx_q);
2763 skb_queue_head_init(&hdev->cmd_q);
2764 skb_queue_head_init(&hdev->raw_q);
2765
2766 init_waitqueue_head(&hdev->req_wait_q);
2767
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002768 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002769
David Herrmannb1b813d2012-04-22 14:39:58 +02002770 hci_init_sysfs(hdev);
2771 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002772
2773 return hdev;
2774}
2775EXPORT_SYMBOL(hci_alloc_dev);
2776
2777/* Free HCI device */
2778void hci_free_dev(struct hci_dev *hdev)
2779{
David Herrmann9be0dab2012-04-22 14:39:57 +02002780 /* will free via device release */
2781 put_device(&hdev->dev);
2782}
2783EXPORT_SYMBOL(hci_free_dev);
2784
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785/* Register HCI device */
2786int hci_register_dev(struct hci_dev *hdev)
2787{
David Herrmannb1b813d2012-04-22 14:39:58 +02002788 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
David Herrmann010666a2012-01-07 15:47:07 +01002790 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 return -EINVAL;
2792
Mat Martineau08add512011-11-02 16:18:36 -07002793 /* Do not allow HCI_AMP devices to register at index 0,
2794 * so the index can be used as the AMP controller ID.
2795 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002796 switch (hdev->dev_type) {
2797 case HCI_BREDR:
2798 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2799 break;
2800 case HCI_AMP:
2801 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2802 break;
2803 default:
2804 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002806
Sasha Levin3df92b32012-05-27 22:36:56 +02002807 if (id < 0)
2808 return id;
2809
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 sprintf(hdev->name, "hci%d", id);
2811 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002812
2813 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2814
Kees Cookd8537542013-07-03 15:04:57 -07002815 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2816 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002817 if (!hdev->workqueue) {
2818 error = -ENOMEM;
2819 goto err;
2820 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002821
Kees Cookd8537542013-07-03 15:04:57 -07002822 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2823 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002824 if (!hdev->req_workqueue) {
2825 destroy_workqueue(hdev->workqueue);
2826 error = -ENOMEM;
2827 goto err;
2828 }
2829
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002830 if (!IS_ERR_OR_NULL(bt_debugfs))
2831 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2832
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002833 dev_set_name(&hdev->dev, "%s", hdev->name);
2834
2835 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002836 if (error < 0)
2837 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002839 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002840 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2841 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002842 if (hdev->rfkill) {
2843 if (rfkill_register(hdev->rfkill) < 0) {
2844 rfkill_destroy(hdev->rfkill);
2845 hdev->rfkill = NULL;
2846 }
2847 }
2848
Johan Hedberg5e130362013-09-13 08:58:17 +03002849 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2850 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2851
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002852 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002853 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002854
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002855 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002856 /* Assume BR/EDR support until proven otherwise (such as
2857 * through reading supported features during init.
2858 */
2859 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2860 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002861
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002862 write_lock(&hci_dev_list_lock);
2863 list_add(&hdev->list, &hci_dev_list);
2864 write_unlock(&hci_dev_list_lock);
2865
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002867 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
Johan Hedberg19202572013-01-14 22:33:51 +02002869 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002872
David Herrmann33ca9542011-10-08 14:58:49 +02002873err_wqueue:
2874 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002875 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002876err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002877 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002878
David Herrmann33ca9542011-10-08 14:58:49 +02002879 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880}
2881EXPORT_SYMBOL(hci_register_dev);
2882
2883/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002884void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885{
Sasha Levin3df92b32012-05-27 22:36:56 +02002886 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002887
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002888 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889
Johan Hovold94324962012-03-15 14:48:41 +01002890 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2891
Sasha Levin3df92b32012-05-27 22:36:56 +02002892 id = hdev->id;
2893
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002894 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002896 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897
2898 hci_dev_do_close(hdev);
2899
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302900 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002901 kfree_skb(hdev->reassembly[i]);
2902
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002903 cancel_work_sync(&hdev->power_on);
2904
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002905 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002906 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002907 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002908 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002909 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002910 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002911
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002912 /* mgmt_index_removed should take care of emptying the
2913 * pending list */
2914 BUG_ON(!list_empty(&hdev->mgmt_pending));
2915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 hci_notify(hdev, HCI_DEV_UNREG);
2917
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002918 if (hdev->rfkill) {
2919 rfkill_unregister(hdev->rfkill);
2920 rfkill_destroy(hdev->rfkill);
2921 }
2922
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002923 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002924
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002925 debugfs_remove_recursive(hdev->debugfs);
2926
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002927 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002928 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002929
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002930 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002931 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002932 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002933 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002934 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002935 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002936 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002937
David Herrmanndc946bd2012-01-07 15:47:24 +01002938 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002939
2940 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941}
2942EXPORT_SYMBOL(hci_unregister_dev);
2943
2944/* Suspend HCI device */
2945int hci_suspend_dev(struct hci_dev *hdev)
2946{
2947 hci_notify(hdev, HCI_DEV_SUSPEND);
2948 return 0;
2949}
2950EXPORT_SYMBOL(hci_suspend_dev);
2951
2952/* Resume HCI device */
2953int hci_resume_dev(struct hci_dev *hdev)
2954{
2955 hci_notify(hdev, HCI_DEV_RESUME);
2956 return 0;
2957}
2958EXPORT_SYMBOL(hci_resume_dev);
2959
Marcel Holtmann76bca882009-11-18 00:40:39 +01002960/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002961int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002962{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002963 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002964 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002965 kfree_skb(skb);
2966 return -ENXIO;
2967 }
2968
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002969 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002970 bt_cb(skb)->incoming = 1;
2971
2972 /* Time stamp */
2973 __net_timestamp(skb);
2974
Marcel Holtmann76bca882009-11-18 00:40:39 +01002975 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002976 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002977
Marcel Holtmann76bca882009-11-18 00:40:39 +01002978 return 0;
2979}
2980EXPORT_SYMBOL(hci_recv_frame);
2981
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302982static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002983 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302984{
2985 int len = 0;
2986 int hlen = 0;
2987 int remain = count;
2988 struct sk_buff *skb;
2989 struct bt_skb_cb *scb;
2990
2991 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002992 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302993 return -EILSEQ;
2994
2995 skb = hdev->reassembly[index];
2996
2997 if (!skb) {
2998 switch (type) {
2999 case HCI_ACLDATA_PKT:
3000 len = HCI_MAX_FRAME_SIZE;
3001 hlen = HCI_ACL_HDR_SIZE;
3002 break;
3003 case HCI_EVENT_PKT:
3004 len = HCI_MAX_EVENT_SIZE;
3005 hlen = HCI_EVENT_HDR_SIZE;
3006 break;
3007 case HCI_SCODATA_PKT:
3008 len = HCI_MAX_SCO_SIZE;
3009 hlen = HCI_SCO_HDR_SIZE;
3010 break;
3011 }
3012
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003013 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303014 if (!skb)
3015 return -ENOMEM;
3016
3017 scb = (void *) skb->cb;
3018 scb->expect = hlen;
3019 scb->pkt_type = type;
3020
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303021 hdev->reassembly[index] = skb;
3022 }
3023
3024 while (count) {
3025 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003026 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303027
3028 memcpy(skb_put(skb, len), data, len);
3029
3030 count -= len;
3031 data += len;
3032 scb->expect -= len;
3033 remain = count;
3034
3035 switch (type) {
3036 case HCI_EVENT_PKT:
3037 if (skb->len == HCI_EVENT_HDR_SIZE) {
3038 struct hci_event_hdr *h = hci_event_hdr(skb);
3039 scb->expect = h->plen;
3040
3041 if (skb_tailroom(skb) < scb->expect) {
3042 kfree_skb(skb);
3043 hdev->reassembly[index] = NULL;
3044 return -ENOMEM;
3045 }
3046 }
3047 break;
3048
3049 case HCI_ACLDATA_PKT:
3050 if (skb->len == HCI_ACL_HDR_SIZE) {
3051 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3052 scb->expect = __le16_to_cpu(h->dlen);
3053
3054 if (skb_tailroom(skb) < scb->expect) {
3055 kfree_skb(skb);
3056 hdev->reassembly[index] = NULL;
3057 return -ENOMEM;
3058 }
3059 }
3060 break;
3061
3062 case HCI_SCODATA_PKT:
3063 if (skb->len == HCI_SCO_HDR_SIZE) {
3064 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3065 scb->expect = h->dlen;
3066
3067 if (skb_tailroom(skb) < scb->expect) {
3068 kfree_skb(skb);
3069 hdev->reassembly[index] = NULL;
3070 return -ENOMEM;
3071 }
3072 }
3073 break;
3074 }
3075
3076 if (scb->expect == 0) {
3077 /* Complete frame */
3078
3079 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003080 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303081
3082 hdev->reassembly[index] = NULL;
3083 return remain;
3084 }
3085 }
3086
3087 return remain;
3088}
3089
Marcel Holtmannef222012007-07-11 06:42:04 +02003090int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3091{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303092 int rem = 0;
3093
Marcel Holtmannef222012007-07-11 06:42:04 +02003094 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3095 return -EILSEQ;
3096
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003097 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003098 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303099 if (rem < 0)
3100 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003101
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303102 data += (count - rem);
3103 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003104 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003105
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303106 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003107}
3108EXPORT_SYMBOL(hci_recv_fragment);
3109
Suraj Sumangala99811512010-07-14 13:02:19 +05303110#define STREAM_REASSEMBLY 0
3111
3112int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3113{
3114 int type;
3115 int rem = 0;
3116
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003117 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303118 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3119
3120 if (!skb) {
3121 struct { char type; } *pkt;
3122
3123 /* Start of the frame */
3124 pkt = data;
3125 type = pkt->type;
3126
3127 data++;
3128 count--;
3129 } else
3130 type = bt_cb(skb)->pkt_type;
3131
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003132 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003133 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303134 if (rem < 0)
3135 return rem;
3136
3137 data += (count - rem);
3138 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003139 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303140
3141 return rem;
3142}
3143EXPORT_SYMBOL(hci_recv_stream_fragment);
3144
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145/* ---- Interface to upper protocols ---- */
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147int hci_register_cb(struct hci_cb *cb)
3148{
3149 BT_DBG("%p name %s", cb, cb->name);
3150
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003151 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003153 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
3155 return 0;
3156}
3157EXPORT_SYMBOL(hci_register_cb);
3158
3159int hci_unregister_cb(struct hci_cb *cb)
3160{
3161 BT_DBG("%p name %s", cb, cb->name);
3162
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003163 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003165 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166
3167 return 0;
3168}
3169EXPORT_SYMBOL(hci_unregister_cb);
3170
Marcel Holtmann51086992013-10-10 14:54:19 -07003171static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003173 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003175 /* Time stamp */
3176 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003178 /* Send copy to monitor */
3179 hci_send_to_monitor(hdev, skb);
3180
3181 if (atomic_read(&hdev->promisc)) {
3182 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003183 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 }
3185
3186 /* Get rid of skb owner, prior to sending to the driver. */
3187 skb_orphan(skb);
3188
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003189 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003190 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191}
3192
Johan Hedberg3119ae92013-03-05 20:37:44 +02003193void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3194{
3195 skb_queue_head_init(&req->cmd_q);
3196 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003197 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003198}
3199
3200int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3201{
3202 struct hci_dev *hdev = req->hdev;
3203 struct sk_buff *skb;
3204 unsigned long flags;
3205
3206 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3207
Andre Guedes5d73e032013-03-08 11:20:16 -03003208 /* If an error occured during request building, remove all HCI
3209 * commands queued on the HCI request queue.
3210 */
3211 if (req->err) {
3212 skb_queue_purge(&req->cmd_q);
3213 return req->err;
3214 }
3215
Johan Hedberg3119ae92013-03-05 20:37:44 +02003216 /* Do not allow empty requests */
3217 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003218 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003219
3220 skb = skb_peek_tail(&req->cmd_q);
3221 bt_cb(skb)->req.complete = complete;
3222
3223 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3224 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3225 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3226
3227 queue_work(hdev->workqueue, &hdev->cmd_work);
3228
3229 return 0;
3230}
3231
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003232static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003233 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234{
3235 int len = HCI_COMMAND_HDR_SIZE + plen;
3236 struct hci_command_hdr *hdr;
3237 struct sk_buff *skb;
3238
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003240 if (!skb)
3241 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
3243 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003244 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 hdr->plen = plen;
3246
3247 if (plen)
3248 memcpy(skb_put(skb, plen), param, plen);
3249
3250 BT_DBG("skb len %d", skb->len);
3251
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003252 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003253
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003254 return skb;
3255}
3256
3257/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003258int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3259 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003260{
3261 struct sk_buff *skb;
3262
3263 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3264
3265 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3266 if (!skb) {
3267 BT_ERR("%s no memory for command", hdev->name);
3268 return -ENOMEM;
3269 }
3270
Johan Hedberg11714b32013-03-05 20:37:47 +02003271 /* Stand-alone HCI commands must be flaged as
3272 * single-command requests.
3273 */
3274 bt_cb(skb)->req.start = true;
3275
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003277 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
3279 return 0;
3280}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
Johan Hedberg71c76a12013-03-05 20:37:46 +02003282/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003283void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3284 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003285{
3286 struct hci_dev *hdev = req->hdev;
3287 struct sk_buff *skb;
3288
3289 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3290
Andre Guedes34739c12013-03-08 11:20:18 -03003291 /* If an error occured during request building, there is no point in
3292 * queueing the HCI command. We can simply return.
3293 */
3294 if (req->err)
3295 return;
3296
Johan Hedberg71c76a12013-03-05 20:37:46 +02003297 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3298 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003299 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3300 hdev->name, opcode);
3301 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003302 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003303 }
3304
3305 if (skb_queue_empty(&req->cmd_q))
3306 bt_cb(skb)->req.start = true;
3307
Johan Hedberg02350a72013-04-03 21:50:29 +03003308 bt_cb(skb)->req.event = event;
3309
Johan Hedberg71c76a12013-03-05 20:37:46 +02003310 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003311}
3312
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003313void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3314 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003315{
3316 hci_req_add_ev(req, opcode, plen, param, 0);
3317}
3318
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003320void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321{
3322 struct hci_command_hdr *hdr;
3323
3324 if (!hdev->sent_cmd)
3325 return NULL;
3326
3327 hdr = (void *) hdev->sent_cmd->data;
3328
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003329 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 return NULL;
3331
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003332 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333
3334 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3335}
3336
3337/* Send ACL data */
3338static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3339{
3340 struct hci_acl_hdr *hdr;
3341 int len = skb->len;
3342
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003343 skb_push(skb, HCI_ACL_HDR_SIZE);
3344 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003345 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003346 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3347 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348}
3349
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003350static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003351 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003353 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 struct hci_dev *hdev = conn->hdev;
3355 struct sk_buff *list;
3356
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003357 skb->len = skb_headlen(skb);
3358 skb->data_len = 0;
3359
3360 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003361
3362 switch (hdev->dev_type) {
3363 case HCI_BREDR:
3364 hci_add_acl_hdr(skb, conn->handle, flags);
3365 break;
3366 case HCI_AMP:
3367 hci_add_acl_hdr(skb, chan->handle, flags);
3368 break;
3369 default:
3370 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3371 return;
3372 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003373
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003374 list = skb_shinfo(skb)->frag_list;
3375 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 /* Non fragmented */
3377 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3378
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003379 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 } else {
3381 /* Fragmented */
3382 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3383
3384 skb_shinfo(skb)->frag_list = NULL;
3385
3386 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003387 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003389 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003390
3391 flags &= ~ACL_START;
3392 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 do {
3394 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003395
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003396 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003397 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
3399 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3400
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003401 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 } while (list);
3403
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003404 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003406}
3407
3408void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3409{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003410 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003411
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003412 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003413
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003414 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003416 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
3419/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003420void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421{
3422 struct hci_dev *hdev = conn->hdev;
3423 struct hci_sco_hdr hdr;
3424
3425 BT_DBG("%s len %d", hdev->name, skb->len);
3426
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003427 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 hdr.dlen = skb->len;
3429
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003430 skb_push(skb, HCI_SCO_HDR_SIZE);
3431 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003432 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003434 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003437 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
3440/* ---- HCI TX task (outgoing data) ---- */
3441
3442/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003443static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3444 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445{
3446 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003447 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003448 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003450 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003452
3453 rcu_read_lock();
3454
3455 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003456 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003458
3459 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3460 continue;
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 num++;
3463
3464 if (c->sent < min) {
3465 min = c->sent;
3466 conn = c;
3467 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003468
3469 if (hci_conn_num(hdev, type) == num)
3470 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 }
3472
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003473 rcu_read_unlock();
3474
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003476 int cnt, q;
3477
3478 switch (conn->type) {
3479 case ACL_LINK:
3480 cnt = hdev->acl_cnt;
3481 break;
3482 case SCO_LINK:
3483 case ESCO_LINK:
3484 cnt = hdev->sco_cnt;
3485 break;
3486 case LE_LINK:
3487 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3488 break;
3489 default:
3490 cnt = 0;
3491 BT_ERR("Unknown link type");
3492 }
3493
3494 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 *quote = q ? q : 1;
3496 } else
3497 *quote = 0;
3498
3499 BT_DBG("conn %p quote %d", conn, *quote);
3500 return conn;
3501}
3502
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003503static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003506 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507
Ville Tervobae1f5d92011-02-10 22:38:53 -03003508 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003510 rcu_read_lock();
3511
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003513 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003514 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003515 BT_ERR("%s killing stalled connection %pMR",
3516 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003517 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 }
3519 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003520
3521 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522}
3523
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003524static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3525 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003526{
3527 struct hci_conn_hash *h = &hdev->conn_hash;
3528 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003529 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003530 struct hci_conn *conn;
3531 int cnt, q, conn_num = 0;
3532
3533 BT_DBG("%s", hdev->name);
3534
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003535 rcu_read_lock();
3536
3537 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003538 struct hci_chan *tmp;
3539
3540 if (conn->type != type)
3541 continue;
3542
3543 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3544 continue;
3545
3546 conn_num++;
3547
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003548 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003549 struct sk_buff *skb;
3550
3551 if (skb_queue_empty(&tmp->data_q))
3552 continue;
3553
3554 skb = skb_peek(&tmp->data_q);
3555 if (skb->priority < cur_prio)
3556 continue;
3557
3558 if (skb->priority > cur_prio) {
3559 num = 0;
3560 min = ~0;
3561 cur_prio = skb->priority;
3562 }
3563
3564 num++;
3565
3566 if (conn->sent < min) {
3567 min = conn->sent;
3568 chan = tmp;
3569 }
3570 }
3571
3572 if (hci_conn_num(hdev, type) == conn_num)
3573 break;
3574 }
3575
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003576 rcu_read_unlock();
3577
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003578 if (!chan)
3579 return NULL;
3580
3581 switch (chan->conn->type) {
3582 case ACL_LINK:
3583 cnt = hdev->acl_cnt;
3584 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003585 case AMP_LINK:
3586 cnt = hdev->block_cnt;
3587 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003588 case SCO_LINK:
3589 case ESCO_LINK:
3590 cnt = hdev->sco_cnt;
3591 break;
3592 case LE_LINK:
3593 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3594 break;
3595 default:
3596 cnt = 0;
3597 BT_ERR("Unknown link type");
3598 }
3599
3600 q = cnt / num;
3601 *quote = q ? q : 1;
3602 BT_DBG("chan %p quote %d", chan, *quote);
3603 return chan;
3604}
3605
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003606static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3607{
3608 struct hci_conn_hash *h = &hdev->conn_hash;
3609 struct hci_conn *conn;
3610 int num = 0;
3611
3612 BT_DBG("%s", hdev->name);
3613
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003614 rcu_read_lock();
3615
3616 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003617 struct hci_chan *chan;
3618
3619 if (conn->type != type)
3620 continue;
3621
3622 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3623 continue;
3624
3625 num++;
3626
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003627 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003628 struct sk_buff *skb;
3629
3630 if (chan->sent) {
3631 chan->sent = 0;
3632 continue;
3633 }
3634
3635 if (skb_queue_empty(&chan->data_q))
3636 continue;
3637
3638 skb = skb_peek(&chan->data_q);
3639 if (skb->priority >= HCI_PRIO_MAX - 1)
3640 continue;
3641
3642 skb->priority = HCI_PRIO_MAX - 1;
3643
3644 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003645 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003646 }
3647
3648 if (hci_conn_num(hdev, type) == num)
3649 break;
3650 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003651
3652 rcu_read_unlock();
3653
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003654}
3655
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003656static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3657{
3658 /* Calculate count of blocks used by this packet */
3659 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3660}
3661
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003662static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 if (!test_bit(HCI_RAW, &hdev->flags)) {
3665 /* ACL tx timeout must be longer than maximum
3666 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003667 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003668 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003669 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003671}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003673static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003674{
3675 unsigned int cnt = hdev->acl_cnt;
3676 struct hci_chan *chan;
3677 struct sk_buff *skb;
3678 int quote;
3679
3680 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003681
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003682 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003683 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003684 u32 priority = (skb_peek(&chan->data_q))->priority;
3685 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003686 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003687 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003688
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003689 /* Stop if priority has changed */
3690 if (skb->priority < priority)
3691 break;
3692
3693 skb = skb_dequeue(&chan->data_q);
3694
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003695 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003696 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003697
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003698 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 hdev->acl_last_tx = jiffies;
3700
3701 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003702 chan->sent++;
3703 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 }
3705 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003706
3707 if (cnt != hdev->acl_cnt)
3708 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709}
3710
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003711static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003712{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003713 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003714 struct hci_chan *chan;
3715 struct sk_buff *skb;
3716 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003717 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003718
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003719 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003720
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003721 BT_DBG("%s", hdev->name);
3722
3723 if (hdev->dev_type == HCI_AMP)
3724 type = AMP_LINK;
3725 else
3726 type = ACL_LINK;
3727
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003728 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003729 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003730 u32 priority = (skb_peek(&chan->data_q))->priority;
3731 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3732 int blocks;
3733
3734 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003735 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003736
3737 /* Stop if priority has changed */
3738 if (skb->priority < priority)
3739 break;
3740
3741 skb = skb_dequeue(&chan->data_q);
3742
3743 blocks = __get_blocks(hdev, skb);
3744 if (blocks > hdev->block_cnt)
3745 return;
3746
3747 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003748 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003749
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003750 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003751 hdev->acl_last_tx = jiffies;
3752
3753 hdev->block_cnt -= blocks;
3754 quote -= blocks;
3755
3756 chan->sent += blocks;
3757 chan->conn->sent += blocks;
3758 }
3759 }
3760
3761 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003762 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003763}
3764
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003765static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003766{
3767 BT_DBG("%s", hdev->name);
3768
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003769 /* No ACL link over BR/EDR controller */
3770 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3771 return;
3772
3773 /* No AMP link over AMP controller */
3774 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003775 return;
3776
3777 switch (hdev->flow_ctl_mode) {
3778 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3779 hci_sched_acl_pkt(hdev);
3780 break;
3781
3782 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3783 hci_sched_acl_blk(hdev);
3784 break;
3785 }
3786}
3787
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003789static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790{
3791 struct hci_conn *conn;
3792 struct sk_buff *skb;
3793 int quote;
3794
3795 BT_DBG("%s", hdev->name);
3796
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003797 if (!hci_conn_num(hdev, SCO_LINK))
3798 return;
3799
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3801 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3802 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003803 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
3805 conn->sent++;
3806 if (conn->sent == ~0)
3807 conn->sent = 0;
3808 }
3809 }
3810}
3811
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003812static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003813{
3814 struct hci_conn *conn;
3815 struct sk_buff *skb;
3816 int quote;
3817
3818 BT_DBG("%s", hdev->name);
3819
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003820 if (!hci_conn_num(hdev, ESCO_LINK))
3821 return;
3822
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003823 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3824 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003825 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3826 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003827 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003828
3829 conn->sent++;
3830 if (conn->sent == ~0)
3831 conn->sent = 0;
3832 }
3833 }
3834}
3835
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003836static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003837{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003838 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003839 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003840 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003841
3842 BT_DBG("%s", hdev->name);
3843
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003844 if (!hci_conn_num(hdev, LE_LINK))
3845 return;
3846
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003847 if (!test_bit(HCI_RAW, &hdev->flags)) {
3848 /* LE tx timeout must be longer than maximum
3849 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003850 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003851 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003852 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003853 }
3854
3855 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003856 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003857 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003858 u32 priority = (skb_peek(&chan->data_q))->priority;
3859 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003860 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003861 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003862
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003863 /* Stop if priority has changed */
3864 if (skb->priority < priority)
3865 break;
3866
3867 skb = skb_dequeue(&chan->data_q);
3868
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003869 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003870 hdev->le_last_tx = jiffies;
3871
3872 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003873 chan->sent++;
3874 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003875 }
3876 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003877
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003878 if (hdev->le_pkts)
3879 hdev->le_cnt = cnt;
3880 else
3881 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003882
3883 if (cnt != tmp)
3884 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003885}
3886
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003887static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003889 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 struct sk_buff *skb;
3891
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003892 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003893 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894
Marcel Holtmann52de5992013-09-03 18:08:38 -07003895 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3896 /* Schedule queues and send stuff to HCI driver */
3897 hci_sched_acl(hdev);
3898 hci_sched_sco(hdev);
3899 hci_sched_esco(hdev);
3900 hci_sched_le(hdev);
3901 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003902
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903 /* Send next queued raw (unknown type) packet */
3904 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003905 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906}
3907
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003908/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909
3910/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003911static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912{
3913 struct hci_acl_hdr *hdr = (void *) skb->data;
3914 struct hci_conn *conn;
3915 __u16 handle, flags;
3916
3917 skb_pull(skb, HCI_ACL_HDR_SIZE);
3918
3919 handle = __le16_to_cpu(hdr->handle);
3920 flags = hci_flags(handle);
3921 handle = hci_handle(handle);
3922
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003923 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003924 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925
3926 hdev->stat.acl_rx++;
3927
3928 hci_dev_lock(hdev);
3929 conn = hci_conn_hash_lookup_handle(hdev, handle);
3930 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003931
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003933 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003936 l2cap_recv_acldata(conn, skb, flags);
3937 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003939 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003940 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 }
3942
3943 kfree_skb(skb);
3944}
3945
3946/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003947static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948{
3949 struct hci_sco_hdr *hdr = (void *) skb->data;
3950 struct hci_conn *conn;
3951 __u16 handle;
3952
3953 skb_pull(skb, HCI_SCO_HDR_SIZE);
3954
3955 handle = __le16_to_cpu(hdr->handle);
3956
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003957 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958
3959 hdev->stat.sco_rx++;
3960
3961 hci_dev_lock(hdev);
3962 conn = hci_conn_hash_lookup_handle(hdev, handle);
3963 hci_dev_unlock(hdev);
3964
3965 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003967 sco_recv_scodata(conn, skb);
3968 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003970 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003971 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 }
3973
3974 kfree_skb(skb);
3975}
3976
Johan Hedberg9238f362013-03-05 20:37:48 +02003977static bool hci_req_is_complete(struct hci_dev *hdev)
3978{
3979 struct sk_buff *skb;
3980
3981 skb = skb_peek(&hdev->cmd_q);
3982 if (!skb)
3983 return true;
3984
3985 return bt_cb(skb)->req.start;
3986}
3987
Johan Hedberg42c6b122013-03-05 20:37:49 +02003988static void hci_resend_last(struct hci_dev *hdev)
3989{
3990 struct hci_command_hdr *sent;
3991 struct sk_buff *skb;
3992 u16 opcode;
3993
3994 if (!hdev->sent_cmd)
3995 return;
3996
3997 sent = (void *) hdev->sent_cmd->data;
3998 opcode = __le16_to_cpu(sent->opcode);
3999 if (opcode == HCI_OP_RESET)
4000 return;
4001
4002 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4003 if (!skb)
4004 return;
4005
4006 skb_queue_head(&hdev->cmd_q, skb);
4007 queue_work(hdev->workqueue, &hdev->cmd_work);
4008}
4009
Johan Hedberg9238f362013-03-05 20:37:48 +02004010void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4011{
4012 hci_req_complete_t req_complete = NULL;
4013 struct sk_buff *skb;
4014 unsigned long flags;
4015
4016 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4017
Johan Hedberg42c6b122013-03-05 20:37:49 +02004018 /* If the completed command doesn't match the last one that was
4019 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004020 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004021 if (!hci_sent_cmd_data(hdev, opcode)) {
4022 /* Some CSR based controllers generate a spontaneous
4023 * reset complete event during init and any pending
4024 * command will never be completed. In such a case we
4025 * need to resend whatever was the last sent
4026 * command.
4027 */
4028 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4029 hci_resend_last(hdev);
4030
Johan Hedberg9238f362013-03-05 20:37:48 +02004031 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004032 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004033
4034 /* If the command succeeded and there's still more commands in
4035 * this request the request is not yet complete.
4036 */
4037 if (!status && !hci_req_is_complete(hdev))
4038 return;
4039
4040 /* If this was the last command in a request the complete
4041 * callback would be found in hdev->sent_cmd instead of the
4042 * command queue (hdev->cmd_q).
4043 */
4044 if (hdev->sent_cmd) {
4045 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004046
4047 if (req_complete) {
4048 /* We must set the complete callback to NULL to
4049 * avoid calling the callback more than once if
4050 * this function gets called again.
4051 */
4052 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4053
Johan Hedberg9238f362013-03-05 20:37:48 +02004054 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004055 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004056 }
4057
4058 /* Remove all pending commands belonging to this request */
4059 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4060 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4061 if (bt_cb(skb)->req.start) {
4062 __skb_queue_head(&hdev->cmd_q, skb);
4063 break;
4064 }
4065
4066 req_complete = bt_cb(skb)->req.complete;
4067 kfree_skb(skb);
4068 }
4069 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4070
4071call_complete:
4072 if (req_complete)
4073 req_complete(hdev, status);
4074}
4075
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004076static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004078 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 struct sk_buff *skb;
4080
4081 BT_DBG("%s", hdev->name);
4082
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004084 /* Send copy to monitor */
4085 hci_send_to_monitor(hdev, skb);
4086
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 if (atomic_read(&hdev->promisc)) {
4088 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004089 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 }
4091
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004092 if (test_bit(HCI_RAW, &hdev->flags) ||
4093 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094 kfree_skb(skb);
4095 continue;
4096 }
4097
4098 if (test_bit(HCI_INIT, &hdev->flags)) {
4099 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004100 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 case HCI_ACLDATA_PKT:
4102 case HCI_SCODATA_PKT:
4103 kfree_skb(skb);
4104 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 }
4107
4108 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004109 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004111 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112 hci_event_packet(hdev, skb);
4113 break;
4114
4115 case HCI_ACLDATA_PKT:
4116 BT_DBG("%s ACL data packet", hdev->name);
4117 hci_acldata_packet(hdev, skb);
4118 break;
4119
4120 case HCI_SCODATA_PKT:
4121 BT_DBG("%s SCO data packet", hdev->name);
4122 hci_scodata_packet(hdev, skb);
4123 break;
4124
4125 default:
4126 kfree_skb(skb);
4127 break;
4128 }
4129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130}
4131
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004132static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004134 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 struct sk_buff *skb;
4136
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004137 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4138 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004141 if (atomic_read(&hdev->cmd_cnt)) {
4142 skb = skb_dequeue(&hdev->cmd_q);
4143 if (!skb)
4144 return;
4145
Wei Yongjun7585b972009-02-25 18:29:52 +08004146 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004148 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004149 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004151 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004152 if (test_bit(HCI_RESET, &hdev->flags))
4153 del_timer(&hdev->cmd_timer);
4154 else
4155 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004156 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 } else {
4158 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004159 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 }
4161 }
4162}