blob: 6b1844f00fbf93d627cf0e0202d71310edb07e34 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700190static int link_keys_show(struct seq_file *f, void *ptr)
191{
192 struct hci_dev *hdev = f->private;
193 struct list_head *p, *n;
194
195 hci_dev_lock(hdev);
196 list_for_each_safe(p, n, &hdev->link_keys) {
197 struct link_key *key = list_entry(p, struct link_key, list);
198 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
199 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
200 }
201 hci_dev_unlock(hdev);
202
203 return 0;
204}
205
206static int link_keys_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, link_keys_show, inode->i_private);
209}
210
211static const struct file_operations link_keys_fops = {
212 .open = link_keys_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700218static int dev_class_show(struct seq_file *f, void *ptr)
219{
220 struct hci_dev *hdev = f->private;
221
222 hci_dev_lock(hdev);
223 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
224 hdev->dev_class[1], hdev->dev_class[0]);
225 hci_dev_unlock(hdev);
226
227 return 0;
228}
229
230static int dev_class_open(struct inode *inode, struct file *file)
231{
232 return single_open(file, dev_class_show, inode->i_private);
233}
234
235static const struct file_operations dev_class_fops = {
236 .open = dev_class_open,
237 .read = seq_read,
238 .llseek = seq_lseek,
239 .release = single_release,
240};
241
Marcel Holtmann041000b2013-10-17 12:02:31 -0700242static int voice_setting_get(void *data, u64 *val)
243{
244 struct hci_dev *hdev = data;
245
246 hci_dev_lock(hdev);
247 *val = hdev->voice_setting;
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
254 NULL, "0x%4.4llx\n");
255
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700256static int auto_accept_delay_set(void *data, u64 val)
257{
258 struct hci_dev *hdev = data;
259
260 hci_dev_lock(hdev);
261 hdev->auto_accept_delay = val;
262 hci_dev_unlock(hdev);
263
264 return 0;
265}
266
267static int auto_accept_delay_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->auto_accept_delay;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
279 auto_accept_delay_set, "%llu\n");
280
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700281static int idle_timeout_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 if (val != 0 && (val < 500 || val > 3600000))
286 return -EINVAL;
287
288 hci_dev_lock(hdev);
289 hdev->idle_timeout= val;
290 hci_dev_unlock(hdev);
291
292 return 0;
293}
294
295static int idle_timeout_get(void *data, u64 *val)
296{
297 struct hci_dev *hdev = data;
298
299 hci_dev_lock(hdev);
300 *val = hdev->idle_timeout;
301 hci_dev_unlock(hdev);
302
303 return 0;
304}
305
306DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
307 idle_timeout_set, "%llu\n");
308
309static int sniff_min_interval_set(void *data, u64 val)
310{
311 struct hci_dev *hdev = data;
312
313 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
314 return -EINVAL;
315
316 hci_dev_lock(hdev);
317 hdev->sniff_min_interval= val;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323static int sniff_min_interval_get(void *data, u64 *val)
324{
325 struct hci_dev *hdev = data;
326
327 hci_dev_lock(hdev);
328 *val = hdev->sniff_min_interval;
329 hci_dev_unlock(hdev);
330
331 return 0;
332}
333
334DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
335 sniff_min_interval_set, "%llu\n");
336
337static int sniff_max_interval_set(void *data, u64 val)
338{
339 struct hci_dev *hdev = data;
340
341 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
342 return -EINVAL;
343
344 hci_dev_lock(hdev);
345 hdev->sniff_max_interval= val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int sniff_max_interval_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->sniff_max_interval;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
363 sniff_max_interval_set, "%llu\n");
364
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700365static int static_address_show(struct seq_file *f, void *p)
366{
367 struct hci_dev *hdev = f->private;
368
369 hci_dev_lock(hdev);
370 seq_printf(f, "%pMR\n", &hdev->static_addr);
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int static_address_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, static_address_show, inode->i_private);
379}
380
381static const struct file_operations static_address_fops = {
382 .open = static_address_open,
383 .read = seq_read,
384 .llseek = seq_lseek,
385 .release = single_release,
386};
387
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700388static int long_term_keys_show(struct seq_file *f, void *ptr)
389{
390 struct hci_dev *hdev = f->private;
391 struct list_head *p, *n;
392
393 hci_dev_lock(hdev);
394 list_for_each_safe(p, n, &hdev->link_keys) {
395 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
396 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
397 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
398 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
399 8, ltk->rand, 16, ltk->val);
400 }
401 hci_dev_unlock(hdev);
402
403 return 0;
404}
405
406static int long_term_keys_open(struct inode *inode, struct file *file)
407{
408 return single_open(file, long_term_keys_show, inode->i_private);
409}
410
411static const struct file_operations long_term_keys_fops = {
412 .open = long_term_keys_open,
413 .read = seq_read,
414 .llseek = seq_lseek,
415 .release = single_release,
416};
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/* ---- HCI requests ---- */
419
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424 if (hdev->req_status == HCI_REQ_PEND) {
425 hdev->req_result = result;
426 hdev->req_status = HCI_REQ_DONE;
427 wake_up_interruptible(&hdev->req_wait_q);
428 }
429}
430
431static void hci_req_cancel(struct hci_dev *hdev, int err)
432{
433 BT_DBG("%s err 0x%2.2x", hdev->name, err);
434
435 if (hdev->req_status == HCI_REQ_PEND) {
436 hdev->req_result = err;
437 hdev->req_status = HCI_REQ_CANCELED;
438 wake_up_interruptible(&hdev->req_wait_q);
439 }
440}
441
Fengguang Wu77a63e02013-04-20 16:24:31 +0300442static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
443 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300444{
445 struct hci_ev_cmd_complete *ev;
446 struct hci_event_hdr *hdr;
447 struct sk_buff *skb;
448
449 hci_dev_lock(hdev);
450
451 skb = hdev->recv_evt;
452 hdev->recv_evt = NULL;
453
454 hci_dev_unlock(hdev);
455
456 if (!skb)
457 return ERR_PTR(-ENODATA);
458
459 if (skb->len < sizeof(*hdr)) {
460 BT_ERR("Too short HCI event");
461 goto failed;
462 }
463
464 hdr = (void *) skb->data;
465 skb_pull(skb, HCI_EVENT_HDR_SIZE);
466
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300467 if (event) {
468 if (hdr->evt != event)
469 goto failed;
470 return skb;
471 }
472
Johan Hedberg75e84b72013-04-02 13:35:04 +0300473 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
474 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
475 goto failed;
476 }
477
478 if (skb->len < sizeof(*ev)) {
479 BT_ERR("Too short cmd_complete event");
480 goto failed;
481 }
482
483 ev = (void *) skb->data;
484 skb_pull(skb, sizeof(*ev));
485
486 if (opcode == __le16_to_cpu(ev->opcode))
487 return skb;
488
489 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
490 __le16_to_cpu(ev->opcode));
491
492failed:
493 kfree_skb(skb);
494 return ERR_PTR(-ENODATA);
495}
496
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300497struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300498 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300499{
500 DECLARE_WAITQUEUE(wait, current);
501 struct hci_request req;
502 int err = 0;
503
504 BT_DBG("%s", hdev->name);
505
506 hci_req_init(&req, hdev);
507
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300508 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300509
510 hdev->req_status = HCI_REQ_PEND;
511
512 err = hci_req_run(&req, hci_req_sync_complete);
513 if (err < 0)
514 return ERR_PTR(err);
515
516 add_wait_queue(&hdev->req_wait_q, &wait);
517 set_current_state(TASK_INTERRUPTIBLE);
518
519 schedule_timeout(timeout);
520
521 remove_wait_queue(&hdev->req_wait_q, &wait);
522
523 if (signal_pending(current))
524 return ERR_PTR(-EINTR);
525
526 switch (hdev->req_status) {
527 case HCI_REQ_DONE:
528 err = -bt_to_errno(hdev->req_result);
529 break;
530
531 case HCI_REQ_CANCELED:
532 err = -hdev->req_result;
533 break;
534
535 default:
536 err = -ETIMEDOUT;
537 break;
538 }
539
540 hdev->req_status = hdev->req_result = 0;
541
542 BT_DBG("%s end: err %d", hdev->name, err);
543
544 if (err < 0)
545 return ERR_PTR(err);
546
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300547 return hci_get_cmd_complete(hdev, opcode, event);
548}
549EXPORT_SYMBOL(__hci_cmd_sync_ev);
550
551struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300552 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300553{
554 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300555}
556EXPORT_SYMBOL(__hci_cmd_sync);
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200559static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560 void (*func)(struct hci_request *req,
561 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200562 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 DECLARE_WAITQUEUE(wait, current);
566 int err = 0;
567
568 BT_DBG("%s start", hdev->name);
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570 hci_req_init(&req, hdev);
571
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 hdev->req_status = HCI_REQ_PEND;
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200575
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 err = hci_req_run(&req, hci_req_sync_complete);
577 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200578 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300579
580 /* ENODATA means the HCI request command queue is empty.
581 * This can happen when a request with conditionals doesn't
582 * trigger any commands to be sent. This is normal behavior
583 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584 */
Andre Guedes920c8302013-03-08 11:20:15 -0300585 if (err == -ENODATA)
586 return 0;
587
588 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200589 }
590
Andre Guedesbc4445c2013-03-08 11:20:13 -0300591 add_wait_queue(&hdev->req_wait_q, &wait);
592 set_current_state(TASK_INTERRUPTIBLE);
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 schedule_timeout(timeout);
595
596 remove_wait_queue(&hdev->req_wait_q, &wait);
597
598 if (signal_pending(current))
599 return -EINTR;
600
601 switch (hdev->req_status) {
602 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700603 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 break;
605
606 case HCI_REQ_CANCELED:
607 err = -hdev->req_result;
608 break;
609
610 default:
611 err = -ETIMEDOUT;
612 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Johan Hedberga5040ef2011-01-10 13:28:59 +0200615 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 BT_DBG("%s end: err %d", hdev->name, err);
618
619 return err;
620}
621
Johan Hedberg01178cd2013-03-05 20:37:41 +0200622static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200623 void (*req)(struct hci_request *req,
624 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200625 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 int ret;
628
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200629 if (!test_bit(HCI_UP, &hdev->flags))
630 return -ENETDOWN;
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 /* Serialize all requests */
633 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200634 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 hci_req_unlock(hdev);
636
637 return ret;
638}
639
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645 set_bit(HCI_RESET, &req->hdev->flags);
646 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200656 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658
659 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200664{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200666
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200667 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300669
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700670 /* Read Local Supported Commands */
671 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
672
673 /* Read Local Supported Features */
674 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
675
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300676 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300678
679 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700681
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700682 /* Read Flow Control Mode */
683 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
684
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700685 /* Read Location Data */
686 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200687}
688
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200690{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200692
693 BT_DBG("%s %ld", hdev->name, opt);
694
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300695 /* Reset */
696 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300698
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200699 switch (hdev->dev_type) {
700 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200702 break;
703
704 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200705 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200706 break;
707
708 default:
709 BT_ERR("Unknown device type %d", hdev->dev_type);
710 break;
711 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200712}
713
Johan Hedberg42c6b122013-03-05 20:37:49 +0200714static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200715{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700716 struct hci_dev *hdev = req->hdev;
717
Johan Hedberg2177bab2013-03-05 20:37:43 +0200718 __le16 param;
719 __u8 flt_type;
720
721 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200723
724 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200726
727 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200729
730 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200731 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200732
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700733 /* Read Number of Supported IAC */
734 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
735
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700736 /* Read Current IAC LAP */
737 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
738
Johan Hedberg2177bab2013-03-05 20:37:43 +0200739 /* Clear Event Filters */
740 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200741 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200742
743 /* Connection accept timeout ~20 secs */
744 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200745 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200746
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700747 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
748 * but it does not support page scan related HCI commands.
749 */
750 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500751 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
752 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
753 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200754}
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200757{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300758 struct hci_dev *hdev = req->hdev;
759
Johan Hedberg2177bab2013-03-05 20:37:43 +0200760 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200761 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200762
763 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200765
766 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200768
769 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200770 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200771
772 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200773 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300774
775 /* LE-only controllers have LE implicitly enabled */
776 if (!lmp_bredr_capable(hdev))
777 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200778}
779
780static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
781{
782 if (lmp_ext_inq_capable(hdev))
783 return 0x02;
784
785 if (lmp_inq_rssi_capable(hdev))
786 return 0x01;
787
788 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
789 hdev->lmp_subver == 0x0757)
790 return 0x01;
791
792 if (hdev->manufacturer == 15) {
793 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
794 return 0x01;
795 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
796 return 0x01;
797 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
798 return 0x01;
799 }
800
801 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
802 hdev->lmp_subver == 0x1805)
803 return 0x01;
804
805 return 0x00;
806}
807
Johan Hedberg42c6b122013-03-05 20:37:49 +0200808static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200809{
810 u8 mode;
811
Johan Hedberg42c6b122013-03-05 20:37:49 +0200812 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200813
Johan Hedberg42c6b122013-03-05 20:37:49 +0200814 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200815}
816
Johan Hedberg42c6b122013-03-05 20:37:49 +0200817static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200818{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200819 struct hci_dev *hdev = req->hdev;
820
Johan Hedberg2177bab2013-03-05 20:37:43 +0200821 /* The second byte is 0xff instead of 0x9f (two reserved bits
822 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
823 * command otherwise.
824 */
825 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
826
827 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
828 * any event mask for pre 1.2 devices.
829 */
830 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
831 return;
832
833 if (lmp_bredr_capable(hdev)) {
834 events[4] |= 0x01; /* Flow Specification Complete */
835 events[4] |= 0x02; /* Inquiry Result with RSSI */
836 events[4] |= 0x04; /* Read Remote Extended Features Complete */
837 events[5] |= 0x08; /* Synchronous Connection Complete */
838 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700839 } else {
840 /* Use a different default for LE-only devices */
841 memset(events, 0, sizeof(events));
842 events[0] |= 0x10; /* Disconnection Complete */
843 events[0] |= 0x80; /* Encryption Change */
844 events[1] |= 0x08; /* Read Remote Version Information Complete */
845 events[1] |= 0x20; /* Command Complete */
846 events[1] |= 0x40; /* Command Status */
847 events[1] |= 0x80; /* Hardware Error */
848 events[2] |= 0x04; /* Number of Completed Packets */
849 events[3] |= 0x02; /* Data Buffer Overflow */
850 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200851 }
852
853 if (lmp_inq_rssi_capable(hdev))
854 events[4] |= 0x02; /* Inquiry Result with RSSI */
855
856 if (lmp_sniffsubr_capable(hdev))
857 events[5] |= 0x20; /* Sniff Subrating */
858
859 if (lmp_pause_enc_capable(hdev))
860 events[5] |= 0x80; /* Encryption Key Refresh Complete */
861
862 if (lmp_ext_inq_capable(hdev))
863 events[5] |= 0x40; /* Extended Inquiry Result */
864
865 if (lmp_no_flush_capable(hdev))
866 events[7] |= 0x01; /* Enhanced Flush Complete */
867
868 if (lmp_lsto_capable(hdev))
869 events[6] |= 0x80; /* Link Supervision Timeout Changed */
870
871 if (lmp_ssp_capable(hdev)) {
872 events[6] |= 0x01; /* IO Capability Request */
873 events[6] |= 0x02; /* IO Capability Response */
874 events[6] |= 0x04; /* User Confirmation Request */
875 events[6] |= 0x08; /* User Passkey Request */
876 events[6] |= 0x10; /* Remote OOB Data Request */
877 events[6] |= 0x20; /* Simple Pairing Complete */
878 events[7] |= 0x04; /* User Passkey Notification */
879 events[7] |= 0x08; /* Keypress Notification */
880 events[7] |= 0x10; /* Remote Host Supported
881 * Features Notification
882 */
883 }
884
885 if (lmp_le_capable(hdev))
886 events[7] |= 0x20; /* LE Meta-Event */
887
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200889
890 if (lmp_le_capable(hdev)) {
891 memset(events, 0, sizeof(events));
892 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200893 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
894 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200895 }
896}
897
Johan Hedberg42c6b122013-03-05 20:37:49 +0200898static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200899{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200900 struct hci_dev *hdev = req->hdev;
901
Johan Hedberg2177bab2013-03-05 20:37:43 +0200902 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300904 else
905 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200906
907 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200908 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200909
Johan Hedberg42c6b122013-03-05 20:37:49 +0200910 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200911
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300912 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
913 * local supported commands HCI command.
914 */
915 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200916 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200917
918 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700919 /* When SSP is available, then the host features page
920 * should also be available as well. However some
921 * controllers list the max_page as 0 as long as SSP
922 * has not been enabled. To achieve proper debugging
923 * output, force the minimum max_page to 1 at least.
924 */
925 hdev->max_page = 0x01;
926
Johan Hedberg2177bab2013-03-05 20:37:43 +0200927 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
928 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200929 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
930 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200931 } else {
932 struct hci_cp_write_eir cp;
933
934 memset(hdev->eir, 0, sizeof(hdev->eir));
935 memset(&cp, 0, sizeof(cp));
936
Johan Hedberg42c6b122013-03-05 20:37:49 +0200937 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200938 }
939 }
940
941 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200942 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200943
944 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200945 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200946
947 if (lmp_ext_feat_capable(hdev)) {
948 struct hci_cp_read_local_ext_features cp;
949
950 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
952 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200953 }
954
955 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
956 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200957 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
958 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200959 }
960}
961
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200963{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200965 struct hci_cp_write_def_link_policy cp;
966 u16 link_policy = 0;
967
968 if (lmp_rswitch_capable(hdev))
969 link_policy |= HCI_LP_RSWITCH;
970 if (lmp_hold_capable(hdev))
971 link_policy |= HCI_LP_HOLD;
972 if (lmp_sniff_capable(hdev))
973 link_policy |= HCI_LP_SNIFF;
974 if (lmp_park_capable(hdev))
975 link_policy |= HCI_LP_PARK;
976
977 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200979}
980
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200982{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200983 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200984 struct hci_cp_write_le_host_supported cp;
985
Johan Hedbergc73eee92013-04-19 18:35:21 +0300986 /* LE-only devices do not support explicit enablement */
987 if (!lmp_bredr_capable(hdev))
988 return;
989
Johan Hedberg2177bab2013-03-05 20:37:43 +0200990 memset(&cp, 0, sizeof(cp));
991
992 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
993 cp.le = 0x01;
994 cp.simul = lmp_le_br_capable(hdev);
995 }
996
997 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200998 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
999 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001000}
1001
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001002static void hci_set_event_mask_page_2(struct hci_request *req)
1003{
1004 struct hci_dev *hdev = req->hdev;
1005 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1006
1007 /* If Connectionless Slave Broadcast master role is supported
1008 * enable all necessary events for it.
1009 */
1010 if (hdev->features[2][0] & 0x01) {
1011 events[1] |= 0x40; /* Triggered Clock Capture */
1012 events[1] |= 0x80; /* Synchronization Train Complete */
1013 events[2] |= 0x10; /* Slave Page Response Timeout */
1014 events[2] |= 0x20; /* CSB Channel Map Change */
1015 }
1016
1017 /* If Connectionless Slave Broadcast slave role is supported
1018 * enable all necessary events for it.
1019 */
1020 if (hdev->features[2][0] & 0x02) {
1021 events[2] |= 0x01; /* Synchronization Train Received */
1022 events[2] |= 0x02; /* CSB Receive */
1023 events[2] |= 0x04; /* CSB Timeout */
1024 events[2] |= 0x08; /* Truncated Page Complete */
1025 }
1026
1027 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1028}
1029
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001033 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001035 /* Some Broadcom based Bluetooth controllers do not support the
1036 * Delete Stored Link Key command. They are clearly indicating its
1037 * absence in the bit mask of supported commands.
1038 *
1039 * Check the supported commands and only if the the command is marked
1040 * as supported send it. If not supported assume that the controller
1041 * does not have actual support for stored link keys which makes this
1042 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001043 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001044 if (hdev->commands[6] & 0x80) {
1045 struct hci_cp_delete_stored_link_key cp;
1046
1047 bacpy(&cp.bdaddr, BDADDR_ANY);
1048 cp.delete_all = 0x01;
1049 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1050 sizeof(cp), &cp);
1051 }
1052
Johan Hedberg2177bab2013-03-05 20:37:43 +02001053 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001054 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001055
Marcel Holtmann441ad2d2013-10-15 06:33:52 -07001056 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001057 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001058
1059 /* Read features beyond page 1 if available */
1060 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1061 struct hci_cp_read_local_ext_features cp;
1062
1063 cp.page = p;
1064 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1065 sizeof(cp), &cp);
1066 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067}
1068
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001069static void hci_init4_req(struct hci_request *req, unsigned long opt)
1070{
1071 struct hci_dev *hdev = req->hdev;
1072
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001073 /* Set event mask page 2 if the HCI command for it is supported */
1074 if (hdev->commands[22] & 0x04)
1075 hci_set_event_mask_page_2(req);
1076
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001077 /* Check for Synchronization Train support */
1078 if (hdev->features[2][0] & 0x04)
1079 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1080}
1081
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082static int __hci_init(struct hci_dev *hdev)
1083{
1084 int err;
1085
1086 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1087 if (err < 0)
1088 return err;
1089
1090 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1091 * BR/EDR/LE type controllers. AMP controllers only need the
1092 * first stage init.
1093 */
1094 if (hdev->dev_type != HCI_BREDR)
1095 return 0;
1096
1097 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1098 if (err < 0)
1099 return err;
1100
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001101 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1102 if (err < 0)
1103 return err;
1104
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001105 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1106 if (err < 0)
1107 return err;
1108
1109 /* Only create debugfs entries during the initial setup
1110 * phase and not every time the controller gets powered on.
1111 */
1112 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1113 return 0;
1114
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001115 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1116 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001117 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1118 &hdev->manufacturer);
1119 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1120 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001121 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1122 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001123 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1124
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001125 if (lmp_bredr_capable(hdev)) {
1126 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1127 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001128 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1129 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001130 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1131 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001132 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1133 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001134 }
1135
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001136 if (lmp_ssp_capable(hdev))
1137 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1138 hdev, &auto_accept_delay_fops);
1139
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001140 if (lmp_sniff_capable(hdev)) {
1141 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1142 hdev, &idle_timeout_fops);
1143 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1144 hdev, &sniff_min_interval_fops);
1145 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1146 hdev, &sniff_max_interval_fops);
1147 }
1148
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001149 if (lmp_le_capable(hdev)) {
1150 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1151 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001152 debugfs_create_file("static_address", 0444, hdev->debugfs,
1153 hdev, &static_address_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001154 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1155 hdev, &long_term_keys_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001156 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001157
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001158 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159}
1160
Johan Hedberg42c6b122013-03-05 20:37:49 +02001161static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162{
1163 __u8 scan = opt;
1164
Johan Hedberg42c6b122013-03-05 20:37:49 +02001165 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001168 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169}
1170
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172{
1173 __u8 auth = opt;
1174
Johan Hedberg42c6b122013-03-05 20:37:49 +02001175 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179}
1180
Johan Hedberg42c6b122013-03-05 20:37:49 +02001181static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182{
1183 __u8 encrypt = opt;
1184
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001187 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189}
1190
Johan Hedberg42c6b122013-03-05 20:37:49 +02001191static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001192{
1193 __le16 policy = cpu_to_le16(opt);
1194
Johan Hedberg42c6b122013-03-05 20:37:49 +02001195 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001196
1197 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001199}
1200
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001201/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 * Device is held on return. */
1203struct hci_dev *hci_dev_get(int index)
1204{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001205 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 BT_DBG("%d", index);
1208
1209 if (index < 0)
1210 return NULL;
1211
1212 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001213 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 if (d->id == index) {
1215 hdev = hci_dev_hold(d);
1216 break;
1217 }
1218 }
1219 read_unlock(&hci_dev_list_lock);
1220 return hdev;
1221}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
1223/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001224
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001225bool hci_discovery_active(struct hci_dev *hdev)
1226{
1227 struct discovery_state *discov = &hdev->discovery;
1228
Andre Guedes6fbe1952012-02-03 17:47:58 -03001229 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001230 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001231 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001232 return true;
1233
Andre Guedes6fbe1952012-02-03 17:47:58 -03001234 default:
1235 return false;
1236 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001237}
1238
Johan Hedbergff9ef572012-01-04 14:23:45 +02001239void hci_discovery_set_state(struct hci_dev *hdev, int state)
1240{
1241 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1242
1243 if (hdev->discovery.state == state)
1244 return;
1245
1246 switch (state) {
1247 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001248 if (hdev->discovery.state != DISCOVERY_STARTING)
1249 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001250 break;
1251 case DISCOVERY_STARTING:
1252 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001253 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001254 mgmt_discovering(hdev, 1);
1255 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001256 case DISCOVERY_RESOLVING:
1257 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001258 case DISCOVERY_STOPPING:
1259 break;
1260 }
1261
1262 hdev->discovery.state = state;
1263}
1264
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001265void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266{
Johan Hedberg30883512012-01-04 14:16:21 +02001267 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001268 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Johan Hedberg561aafb2012-01-04 13:31:59 +02001270 list_for_each_entry_safe(p, n, &cache->all, all) {
1271 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001272 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001274
1275 INIT_LIST_HEAD(&cache->unknown);
1276 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277}
1278
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001279struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1280 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281{
Johan Hedberg30883512012-01-04 14:16:21 +02001282 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct inquiry_entry *e;
1284
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001285 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Johan Hedberg561aafb2012-01-04 13:31:59 +02001287 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001289 return e;
1290 }
1291
1292 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
Johan Hedberg561aafb2012-01-04 13:31:59 +02001295struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001296 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001297{
Johan Hedberg30883512012-01-04 14:16:21 +02001298 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001299 struct inquiry_entry *e;
1300
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001301 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001302
1303 list_for_each_entry(e, &cache->unknown, list) {
1304 if (!bacmp(&e->data.bdaddr, bdaddr))
1305 return e;
1306 }
1307
1308 return NULL;
1309}
1310
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001311struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001312 bdaddr_t *bdaddr,
1313 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001314{
1315 struct discovery_state *cache = &hdev->discovery;
1316 struct inquiry_entry *e;
1317
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001318 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001319
1320 list_for_each_entry(e, &cache->resolve, list) {
1321 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1322 return e;
1323 if (!bacmp(&e->data.bdaddr, bdaddr))
1324 return e;
1325 }
1326
1327 return NULL;
1328}
1329
Johan Hedberga3d4e202012-01-09 00:53:02 +02001330void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001331 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001332{
1333 struct discovery_state *cache = &hdev->discovery;
1334 struct list_head *pos = &cache->resolve;
1335 struct inquiry_entry *p;
1336
1337 list_del(&ie->list);
1338
1339 list_for_each_entry(p, &cache->resolve, list) {
1340 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001341 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001342 break;
1343 pos = &p->list;
1344 }
1345
1346 list_add(&ie->list, pos);
1347}
1348
Johan Hedberg31754052012-01-04 13:39:52 +02001349bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001350 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351{
Johan Hedberg30883512012-01-04 14:16:21 +02001352 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001353 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001355 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Szymon Janc2b2fec42012-11-20 11:38:54 +01001357 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1358
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001359 if (ssp)
1360 *ssp = data->ssp_mode;
1361
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001362 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001363 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001364 if (ie->data.ssp_mode && ssp)
1365 *ssp = true;
1366
Johan Hedberga3d4e202012-01-09 00:53:02 +02001367 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001368 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001369 ie->data.rssi = data->rssi;
1370 hci_inquiry_cache_update_resolve(hdev, ie);
1371 }
1372
Johan Hedberg561aafb2012-01-04 13:31:59 +02001373 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001374 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001375
Johan Hedberg561aafb2012-01-04 13:31:59 +02001376 /* Entry not in the cache. Add new one. */
1377 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1378 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001379 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001380
1381 list_add(&ie->all, &cache->all);
1382
1383 if (name_known) {
1384 ie->name_state = NAME_KNOWN;
1385 } else {
1386 ie->name_state = NAME_NOT_KNOWN;
1387 list_add(&ie->list, &cache->unknown);
1388 }
1389
1390update:
1391 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001392 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001393 ie->name_state = NAME_KNOWN;
1394 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 }
1396
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001397 memcpy(&ie->data, data, sizeof(*data));
1398 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001400
1401 if (ie->name_state == NAME_NOT_KNOWN)
1402 return false;
1403
1404 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405}
1406
1407static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1408{
Johan Hedberg30883512012-01-04 14:16:21 +02001409 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 struct inquiry_info *info = (struct inquiry_info *) buf;
1411 struct inquiry_entry *e;
1412 int copied = 0;
1413
Johan Hedberg561aafb2012-01-04 13:31:59 +02001414 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001416
1417 if (copied >= num)
1418 break;
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 bacpy(&info->bdaddr, &data->bdaddr);
1421 info->pscan_rep_mode = data->pscan_rep_mode;
1422 info->pscan_period_mode = data->pscan_period_mode;
1423 info->pscan_mode = data->pscan_mode;
1424 memcpy(info->dev_class, data->dev_class, 3);
1425 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001428 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 }
1430
1431 BT_DBG("cache %p, copied %d", cache, copied);
1432 return copied;
1433}
1434
Johan Hedberg42c6b122013-03-05 20:37:49 +02001435static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
1437 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 struct hci_cp_inquiry cp;
1440
1441 BT_DBG("%s", hdev->name);
1442
1443 if (test_bit(HCI_INQUIRY, &hdev->flags))
1444 return;
1445
1446 /* Start Inquiry */
1447 memcpy(&cp.lap, &ir->lap, 3);
1448 cp.length = ir->length;
1449 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
1452
Andre Guedes3e13fa12013-03-27 20:04:56 -03001453static int wait_inquiry(void *word)
1454{
1455 schedule();
1456 return signal_pending(current);
1457}
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459int hci_inquiry(void __user *arg)
1460{
1461 __u8 __user *ptr = arg;
1462 struct hci_inquiry_req ir;
1463 struct hci_dev *hdev;
1464 int err = 0, do_inquiry = 0, max_rsp;
1465 long timeo;
1466 __u8 *buf;
1467
1468 if (copy_from_user(&ir, ptr, sizeof(ir)))
1469 return -EFAULT;
1470
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001471 hdev = hci_dev_get(ir.dev_id);
1472 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return -ENODEV;
1474
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001475 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1476 err = -EBUSY;
1477 goto done;
1478 }
1479
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001480 if (hdev->dev_type != HCI_BREDR) {
1481 err = -EOPNOTSUPP;
1482 goto done;
1483 }
1484
Johan Hedberg56f87902013-10-02 13:43:13 +03001485 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1486 err = -EOPNOTSUPP;
1487 goto done;
1488 }
1489
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001490 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001491 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001492 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001493 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 do_inquiry = 1;
1495 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001496 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Marcel Holtmann04837f62006-07-03 10:02:33 +02001498 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001499
1500 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001501 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1502 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001503 if (err < 0)
1504 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001505
1506 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1507 * cleared). If it is interrupted by a signal, return -EINTR.
1508 */
1509 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1510 TASK_INTERRUPTIBLE))
1511 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001512 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001514 /* for unlimited number of responses we will use buffer with
1515 * 255 entries
1516 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1518
1519 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1520 * copy it to the user space.
1521 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001522 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001523 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 err = -ENOMEM;
1525 goto done;
1526 }
1527
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001528 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001530 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532 BT_DBG("num_rsp %d", ir.num_rsp);
1533
1534 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1535 ptr += sizeof(ir);
1536 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001537 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001539 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 err = -EFAULT;
1541
1542 kfree(buf);
1543
1544done:
1545 hci_dev_put(hdev);
1546 return err;
1547}
1548
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001549static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 int ret = 0;
1552
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 BT_DBG("%s %p", hdev->name, hdev);
1554
1555 hci_req_lock(hdev);
1556
Johan Hovold94324962012-03-15 14:48:41 +01001557 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1558 ret = -ENODEV;
1559 goto done;
1560 }
1561
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001562 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1563 /* Check for rfkill but allow the HCI setup stage to
1564 * proceed (which in itself doesn't cause any RF activity).
1565 */
1566 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1567 ret = -ERFKILL;
1568 goto done;
1569 }
1570
1571 /* Check for valid public address or a configured static
1572 * random adddress, but let the HCI setup proceed to
1573 * be able to determine if there is a public address
1574 * or not.
1575 *
1576 * This check is only valid for BR/EDR controllers
1577 * since AMP controllers do not have an address.
1578 */
1579 if (hdev->dev_type == HCI_BREDR &&
1580 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1581 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1582 ret = -EADDRNOTAVAIL;
1583 goto done;
1584 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001585 }
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 if (test_bit(HCI_UP, &hdev->flags)) {
1588 ret = -EALREADY;
1589 goto done;
1590 }
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if (hdev->open(hdev)) {
1593 ret = -EIO;
1594 goto done;
1595 }
1596
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001597 atomic_set(&hdev->cmd_cnt, 1);
1598 set_bit(HCI_INIT, &hdev->flags);
1599
1600 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1601 ret = hdev->setup(hdev);
1602
1603 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001604 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1605 set_bit(HCI_RAW, &hdev->flags);
1606
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001607 if (!test_bit(HCI_RAW, &hdev->flags) &&
1608 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001609 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 }
1611
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001612 clear_bit(HCI_INIT, &hdev->flags);
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 if (!ret) {
1615 hci_dev_hold(hdev);
1616 set_bit(HCI_UP, &hdev->flags);
1617 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001618 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001619 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001620 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001621 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001622 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001623 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001624 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001625 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001627 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001628 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001629 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
1631 skb_queue_purge(&hdev->cmd_q);
1632 skb_queue_purge(&hdev->rx_q);
1633
1634 if (hdev->flush)
1635 hdev->flush(hdev);
1636
1637 if (hdev->sent_cmd) {
1638 kfree_skb(hdev->sent_cmd);
1639 hdev->sent_cmd = NULL;
1640 }
1641
1642 hdev->close(hdev);
1643 hdev->flags = 0;
1644 }
1645
1646done:
1647 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 return ret;
1649}
1650
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001651/* ---- HCI ioctl helpers ---- */
1652
1653int hci_dev_open(__u16 dev)
1654{
1655 struct hci_dev *hdev;
1656 int err;
1657
1658 hdev = hci_dev_get(dev);
1659 if (!hdev)
1660 return -ENODEV;
1661
Johan Hedberge1d08f42013-10-01 22:44:50 +03001662 /* We need to ensure that no other power on/off work is pending
1663 * before proceeding to call hci_dev_do_open. This is
1664 * particularly important if the setup procedure has not yet
1665 * completed.
1666 */
1667 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1668 cancel_delayed_work(&hdev->power_off);
1669
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001670 /* After this call it is guaranteed that the setup procedure
1671 * has finished. This means that error conditions like RFKILL
1672 * or no valid public or static random address apply.
1673 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001674 flush_workqueue(hdev->req_workqueue);
1675
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001676 err = hci_dev_do_open(hdev);
1677
1678 hci_dev_put(hdev);
1679
1680 return err;
1681}
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683static int hci_dev_do_close(struct hci_dev *hdev)
1684{
1685 BT_DBG("%s %p", hdev->name, hdev);
1686
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001687 cancel_delayed_work(&hdev->power_off);
1688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 hci_req_cancel(hdev, ENODEV);
1690 hci_req_lock(hdev);
1691
1692 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001693 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 hci_req_unlock(hdev);
1695 return 0;
1696 }
1697
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001698 /* Flush RX and TX works */
1699 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001700 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001702 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001703 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001704 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001705 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001706 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001707 }
1708
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001709 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001710 cancel_delayed_work(&hdev->service_cache);
1711
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001712 cancel_delayed_work_sync(&hdev->le_scan_disable);
1713
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001714 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001715 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001717 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 hci_notify(hdev, HCI_DEV_DOWN);
1720
1721 if (hdev->flush)
1722 hdev->flush(hdev);
1723
1724 /* Reset device */
1725 skb_queue_purge(&hdev->cmd_q);
1726 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001727 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001728 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001729 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001731 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 clear_bit(HCI_INIT, &hdev->flags);
1733 }
1734
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001735 /* flush cmd work */
1736 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 /* Drop queues */
1739 skb_queue_purge(&hdev->rx_q);
1740 skb_queue_purge(&hdev->cmd_q);
1741 skb_queue_purge(&hdev->raw_q);
1742
1743 /* Drop last sent command */
1744 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001745 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 kfree_skb(hdev->sent_cmd);
1747 hdev->sent_cmd = NULL;
1748 }
1749
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001750 kfree_skb(hdev->recv_evt);
1751 hdev->recv_evt = NULL;
1752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 /* After this point our queues are empty
1754 * and no tasks are scheduled. */
1755 hdev->close(hdev);
1756
Johan Hedberg35b973c2013-03-15 17:06:59 -05001757 /* Clear flags */
1758 hdev->flags = 0;
1759 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1760
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001761 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1762 if (hdev->dev_type == HCI_BREDR) {
1763 hci_dev_lock(hdev);
1764 mgmt_powered(hdev, 0);
1765 hci_dev_unlock(hdev);
1766 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001767 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001768
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001769 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001770 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001771
Johan Hedberge59fda82012-02-22 18:11:53 +02001772 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001773 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001774
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 hci_req_unlock(hdev);
1776
1777 hci_dev_put(hdev);
1778 return 0;
1779}
1780
1781int hci_dev_close(__u16 dev)
1782{
1783 struct hci_dev *hdev;
1784 int err;
1785
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001786 hdev = hci_dev_get(dev);
1787 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001789
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001790 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1791 err = -EBUSY;
1792 goto done;
1793 }
1794
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001795 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1796 cancel_delayed_work(&hdev->power_off);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001799
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001800done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 hci_dev_put(hdev);
1802 return err;
1803}
1804
1805int hci_dev_reset(__u16 dev)
1806{
1807 struct hci_dev *hdev;
1808 int ret = 0;
1809
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001810 hdev = hci_dev_get(dev);
1811 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 return -ENODEV;
1813
1814 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Marcel Holtmann808a0492013-08-26 20:57:58 -07001816 if (!test_bit(HCI_UP, &hdev->flags)) {
1817 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001821 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1822 ret = -EBUSY;
1823 goto done;
1824 }
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 /* Drop queues */
1827 skb_queue_purge(&hdev->rx_q);
1828 skb_queue_purge(&hdev->cmd_q);
1829
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001830 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001831 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001833 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835 if (hdev->flush)
1836 hdev->flush(hdev);
1837
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001838 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001839 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
1841 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001842 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
1844done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 hci_req_unlock(hdev);
1846 hci_dev_put(hdev);
1847 return ret;
1848}
1849
1850int hci_dev_reset_stat(__u16 dev)
1851{
1852 struct hci_dev *hdev;
1853 int ret = 0;
1854
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001855 hdev = hci_dev_get(dev);
1856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 return -ENODEV;
1858
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001859 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1860 ret = -EBUSY;
1861 goto done;
1862 }
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1865
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001866done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 return ret;
1869}
1870
1871int hci_dev_cmd(unsigned int cmd, void __user *arg)
1872{
1873 struct hci_dev *hdev;
1874 struct hci_dev_req dr;
1875 int err = 0;
1876
1877 if (copy_from_user(&dr, arg, sizeof(dr)))
1878 return -EFAULT;
1879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001880 hdev = hci_dev_get(dr.dev_id);
1881 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 return -ENODEV;
1883
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001884 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1885 err = -EBUSY;
1886 goto done;
1887 }
1888
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001889 if (hdev->dev_type != HCI_BREDR) {
1890 err = -EOPNOTSUPP;
1891 goto done;
1892 }
1893
Johan Hedberg56f87902013-10-02 13:43:13 +03001894 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1895 err = -EOPNOTSUPP;
1896 goto done;
1897 }
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 switch (cmd) {
1900 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001901 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1902 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 break;
1904
1905 case HCISETENCRYPT:
1906 if (!lmp_encrypt_capable(hdev)) {
1907 err = -EOPNOTSUPP;
1908 break;
1909 }
1910
1911 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1912 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001913 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1914 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 if (err)
1916 break;
1917 }
1918
Johan Hedberg01178cd2013-03-05 20:37:41 +02001919 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1920 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 break;
1922
1923 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001924 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1925 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 break;
1927
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001928 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001929 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1930 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001931 break;
1932
1933 case HCISETLINKMODE:
1934 hdev->link_mode = ((__u16) dr.dev_opt) &
1935 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1936 break;
1937
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 case HCISETPTYPE:
1939 hdev->pkt_type = (__u16) dr.dev_opt;
1940 break;
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001943 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1944 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 break;
1946
1947 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001948 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1949 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 break;
1951
1952 default:
1953 err = -EINVAL;
1954 break;
1955 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001956
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001957done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 hci_dev_put(hdev);
1959 return err;
1960}
1961
1962int hci_get_dev_list(void __user *arg)
1963{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001964 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 struct hci_dev_list_req *dl;
1966 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 int n = 0, size, err;
1968 __u16 dev_num;
1969
1970 if (get_user(dev_num, (__u16 __user *) arg))
1971 return -EFAULT;
1972
1973 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1974 return -EINVAL;
1975
1976 size = sizeof(*dl) + dev_num * sizeof(*dr);
1977
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001978 dl = kzalloc(size, GFP_KERNEL);
1979 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 return -ENOMEM;
1981
1982 dr = dl->dev_req;
1983
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001984 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001985 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001986 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001987 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001988
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001989 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1990 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 (dr + n)->dev_id = hdev->id;
1993 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001994
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 if (++n >= dev_num)
1996 break;
1997 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001998 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 dl->dev_num = n;
2001 size = sizeof(*dl) + n * sizeof(*dr);
2002
2003 err = copy_to_user(arg, dl, size);
2004 kfree(dl);
2005
2006 return err ? -EFAULT : 0;
2007}
2008
2009int hci_get_dev_info(void __user *arg)
2010{
2011 struct hci_dev *hdev;
2012 struct hci_dev_info di;
2013 int err = 0;
2014
2015 if (copy_from_user(&di, arg, sizeof(di)))
2016 return -EFAULT;
2017
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002018 hdev = hci_dev_get(di.dev_id);
2019 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 return -ENODEV;
2021
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002022 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002023 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002024
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002025 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2026 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 strcpy(di.name, hdev->name);
2029 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002030 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 di.flags = hdev->flags;
2032 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002033 if (lmp_bredr_capable(hdev)) {
2034 di.acl_mtu = hdev->acl_mtu;
2035 di.acl_pkts = hdev->acl_pkts;
2036 di.sco_mtu = hdev->sco_mtu;
2037 di.sco_pkts = hdev->sco_pkts;
2038 } else {
2039 di.acl_mtu = hdev->le_mtu;
2040 di.acl_pkts = hdev->le_pkts;
2041 di.sco_mtu = 0;
2042 di.sco_pkts = 0;
2043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 di.link_policy = hdev->link_policy;
2045 di.link_mode = hdev->link_mode;
2046
2047 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2048 memcpy(&di.features, &hdev->features, sizeof(di.features));
2049
2050 if (copy_to_user(arg, &di, sizeof(di)))
2051 err = -EFAULT;
2052
2053 hci_dev_put(hdev);
2054
2055 return err;
2056}
2057
2058/* ---- Interface to HCI drivers ---- */
2059
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002060static int hci_rfkill_set_block(void *data, bool blocked)
2061{
2062 struct hci_dev *hdev = data;
2063
2064 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2065
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002066 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2067 return -EBUSY;
2068
Johan Hedberg5e130362013-09-13 08:58:17 +03002069 if (blocked) {
2070 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002071 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2072 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002073 } else {
2074 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002075 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002076
2077 return 0;
2078}
2079
2080static const struct rfkill_ops hci_rfkill_ops = {
2081 .set_block = hci_rfkill_set_block,
2082};
2083
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002084static void hci_power_on(struct work_struct *work)
2085{
2086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002087 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002088
2089 BT_DBG("%s", hdev->name);
2090
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002091 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002092 if (err < 0) {
2093 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002094 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002095 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002096
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002097 /* During the HCI setup phase, a few error conditions are
2098 * ignored and they need to be checked now. If they are still
2099 * valid, it is important to turn the device back off.
2100 */
2101 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2102 (hdev->dev_type == HCI_BREDR &&
2103 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2104 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002105 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2106 hci_dev_do_close(hdev);
2107 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002108 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2109 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002110 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002111
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002112 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002113 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002114}
2115
2116static void hci_power_off(struct work_struct *work)
2117{
Johan Hedberg32435532011-11-07 22:16:04 +02002118 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002119 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002120
2121 BT_DBG("%s", hdev->name);
2122
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002123 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002124}
2125
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002126static void hci_discov_off(struct work_struct *work)
2127{
2128 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002129
2130 hdev = container_of(work, struct hci_dev, discov_off.work);
2131
2132 BT_DBG("%s", hdev->name);
2133
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002134 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002135}
2136
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002137int hci_uuids_clear(struct hci_dev *hdev)
2138{
Johan Hedberg48210022013-01-27 00:31:28 +02002139 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002140
Johan Hedberg48210022013-01-27 00:31:28 +02002141 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2142 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002143 kfree(uuid);
2144 }
2145
2146 return 0;
2147}
2148
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002149int hci_link_keys_clear(struct hci_dev *hdev)
2150{
2151 struct list_head *p, *n;
2152
2153 list_for_each_safe(p, n, &hdev->link_keys) {
2154 struct link_key *key;
2155
2156 key = list_entry(p, struct link_key, list);
2157
2158 list_del(p);
2159 kfree(key);
2160 }
2161
2162 return 0;
2163}
2164
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002165int hci_smp_ltks_clear(struct hci_dev *hdev)
2166{
2167 struct smp_ltk *k, *tmp;
2168
2169 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2170 list_del(&k->list);
2171 kfree(k);
2172 }
2173
2174 return 0;
2175}
2176
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002177struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2178{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002179 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002180
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002181 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002182 if (bacmp(bdaddr, &k->bdaddr) == 0)
2183 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002184
2185 return NULL;
2186}
2187
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302188static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002189 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002190{
2191 /* Legacy key */
2192 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302193 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002194
2195 /* Debug keys are insecure so don't store them persistently */
2196 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302197 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002198
2199 /* Changed combination key and there's no previous one */
2200 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302201 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002202
2203 /* Security mode 3 case */
2204 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302205 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002206
2207 /* Neither local nor remote side had no-bonding as requirement */
2208 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302209 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002210
2211 /* Local side had dedicated bonding as requirement */
2212 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002214
2215 /* Remote side had dedicated bonding as requirement */
2216 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302217 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002218
2219 /* If none of the above criteria match, then don't store the key
2220 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302221 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002222}
2223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002224struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002225{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002226 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002227
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002228 list_for_each_entry(k, &hdev->long_term_keys, list) {
2229 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002230 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002231 continue;
2232
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002233 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002234 }
2235
2236 return NULL;
2237}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002238
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002239struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002240 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002241{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002242 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002243
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002244 list_for_each_entry(k, &hdev->long_term_keys, list)
2245 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002246 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002247 return k;
2248
2249 return NULL;
2250}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002251
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002252int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002253 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002254{
2255 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302256 u8 old_key_type;
2257 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002258
2259 old_key = hci_find_link_key(hdev, bdaddr);
2260 if (old_key) {
2261 old_key_type = old_key->type;
2262 key = old_key;
2263 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002264 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002265 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2266 if (!key)
2267 return -ENOMEM;
2268 list_add(&key->list, &hdev->link_keys);
2269 }
2270
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002271 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002272
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002273 /* Some buggy controller combinations generate a changed
2274 * combination key for legacy pairing even when there's no
2275 * previous key */
2276 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002277 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002278 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002279 if (conn)
2280 conn->key_type = type;
2281 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002282
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002283 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002284 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002285 key->pin_len = pin_len;
2286
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002287 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002288 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002289 else
2290 key->type = type;
2291
Johan Hedberg4df378a2011-04-28 11:29:03 -07002292 if (!new_key)
2293 return 0;
2294
2295 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2296
Johan Hedberg744cf192011-11-08 20:40:14 +02002297 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002298
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302299 if (conn)
2300 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002301
2302 return 0;
2303}
2304
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002305int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002306 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002307 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002308{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002309 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002310
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002311 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2312 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002313
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002314 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2315 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002316 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002317 else {
2318 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002319 if (!key)
2320 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002321 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002322 }
2323
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002324 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002325 key->bdaddr_type = addr_type;
2326 memcpy(key->val, tk, sizeof(key->val));
2327 key->authenticated = authenticated;
2328 key->ediv = ediv;
2329 key->enc_size = enc_size;
2330 key->type = type;
2331 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002332
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002333 if (!new_key)
2334 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002335
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002336 if (type & HCI_SMP_LTK)
2337 mgmt_new_ltk(hdev, key, 1);
2338
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002339 return 0;
2340}
2341
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002342int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2343{
2344 struct link_key *key;
2345
2346 key = hci_find_link_key(hdev, bdaddr);
2347 if (!key)
2348 return -ENOENT;
2349
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002350 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002351
2352 list_del(&key->list);
2353 kfree(key);
2354
2355 return 0;
2356}
2357
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002358int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2359{
2360 struct smp_ltk *k, *tmp;
2361
2362 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2363 if (bacmp(bdaddr, &k->bdaddr))
2364 continue;
2365
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002366 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002367
2368 list_del(&k->list);
2369 kfree(k);
2370 }
2371
2372 return 0;
2373}
2374
Ville Tervo6bd32322011-02-16 16:32:41 +02002375/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002376static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002377{
2378 struct hci_dev *hdev = (void *) arg;
2379
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002380 if (hdev->sent_cmd) {
2381 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2382 u16 opcode = __le16_to_cpu(sent->opcode);
2383
2384 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2385 } else {
2386 BT_ERR("%s command tx timeout", hdev->name);
2387 }
2388
Ville Tervo6bd32322011-02-16 16:32:41 +02002389 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002390 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002391}
2392
Szymon Janc2763eda2011-03-22 13:12:22 +01002393struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002394 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002395{
2396 struct oob_data *data;
2397
2398 list_for_each_entry(data, &hdev->remote_oob_data, list)
2399 if (bacmp(bdaddr, &data->bdaddr) == 0)
2400 return data;
2401
2402 return NULL;
2403}
2404
2405int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2406{
2407 struct oob_data *data;
2408
2409 data = hci_find_remote_oob_data(hdev, bdaddr);
2410 if (!data)
2411 return -ENOENT;
2412
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002413 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002414
2415 list_del(&data->list);
2416 kfree(data);
2417
2418 return 0;
2419}
2420
2421int hci_remote_oob_data_clear(struct hci_dev *hdev)
2422{
2423 struct oob_data *data, *n;
2424
2425 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2426 list_del(&data->list);
2427 kfree(data);
2428 }
2429
2430 return 0;
2431}
2432
2433int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002434 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002435{
2436 struct oob_data *data;
2437
2438 data = hci_find_remote_oob_data(hdev, bdaddr);
2439
2440 if (!data) {
2441 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2442 if (!data)
2443 return -ENOMEM;
2444
2445 bacpy(&data->bdaddr, bdaddr);
2446 list_add(&data->list, &hdev->remote_oob_data);
2447 }
2448
2449 memcpy(data->hash, hash, sizeof(data->hash));
2450 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2451
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002452 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002453
2454 return 0;
2455}
2456
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002457struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2458 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002459{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002460 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002461
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002462 list_for_each_entry(b, &hdev->blacklist, list) {
2463 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002464 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002465 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002466
2467 return NULL;
2468}
2469
2470int hci_blacklist_clear(struct hci_dev *hdev)
2471{
2472 struct list_head *p, *n;
2473
2474 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002475 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002476
2477 list_del(p);
2478 kfree(b);
2479 }
2480
2481 return 0;
2482}
2483
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002484int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002485{
2486 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002487
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002488 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002489 return -EBADF;
2490
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002491 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002492 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002493
2494 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002495 if (!entry)
2496 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002497
2498 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002499 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002500
2501 list_add(&entry->list, &hdev->blacklist);
2502
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002503 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002504}
2505
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002506int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002507{
2508 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002509
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002510 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002511 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002512
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002513 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002514 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002515 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002516
2517 list_del(&entry->list);
2518 kfree(entry);
2519
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002520 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002521}
2522
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002523static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002524{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002525 if (status) {
2526 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002527
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002528 hci_dev_lock(hdev);
2529 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2530 hci_dev_unlock(hdev);
2531 return;
2532 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002533}
2534
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002535static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002536{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002537 /* General inquiry access code (GIAC) */
2538 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2539 struct hci_request req;
2540 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002541 int err;
2542
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002543 if (status) {
2544 BT_ERR("Failed to disable LE scanning: status %d", status);
2545 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002546 }
2547
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002548 switch (hdev->discovery.type) {
2549 case DISCOV_TYPE_LE:
2550 hci_dev_lock(hdev);
2551 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2552 hci_dev_unlock(hdev);
2553 break;
2554
2555 case DISCOV_TYPE_INTERLEAVED:
2556 hci_req_init(&req, hdev);
2557
2558 memset(&cp, 0, sizeof(cp));
2559 memcpy(&cp.lap, lap, sizeof(cp.lap));
2560 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2561 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2562
2563 hci_dev_lock(hdev);
2564
2565 hci_inquiry_cache_flush(hdev);
2566
2567 err = hci_req_run(&req, inquiry_complete);
2568 if (err) {
2569 BT_ERR("Inquiry request failed: err %d", err);
2570 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2571 }
2572
2573 hci_dev_unlock(hdev);
2574 break;
2575 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002576}
2577
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002578static void le_scan_disable_work(struct work_struct *work)
2579{
2580 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002581 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002582 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002583 struct hci_request req;
2584 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002585
2586 BT_DBG("%s", hdev->name);
2587
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002588 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002589
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002590 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002591 cp.enable = LE_SCAN_DISABLE;
2592 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002593
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002594 err = hci_req_run(&req, le_scan_disable_work_complete);
2595 if (err)
2596 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002597}
2598
David Herrmann9be0dab2012-04-22 14:39:57 +02002599/* Alloc HCI device */
2600struct hci_dev *hci_alloc_dev(void)
2601{
2602 struct hci_dev *hdev;
2603
2604 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2605 if (!hdev)
2606 return NULL;
2607
David Herrmannb1b813d2012-04-22 14:39:58 +02002608 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2609 hdev->esco_type = (ESCO_HV1);
2610 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002611 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2612 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002613 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2614 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002615
David Herrmannb1b813d2012-04-22 14:39:58 +02002616 hdev->sniff_max_interval = 800;
2617 hdev->sniff_min_interval = 80;
2618
Marcel Holtmannbef64732013-10-11 08:23:19 -07002619 hdev->le_scan_interval = 0x0060;
2620 hdev->le_scan_window = 0x0030;
2621
David Herrmannb1b813d2012-04-22 14:39:58 +02002622 mutex_init(&hdev->lock);
2623 mutex_init(&hdev->req_lock);
2624
2625 INIT_LIST_HEAD(&hdev->mgmt_pending);
2626 INIT_LIST_HEAD(&hdev->blacklist);
2627 INIT_LIST_HEAD(&hdev->uuids);
2628 INIT_LIST_HEAD(&hdev->link_keys);
2629 INIT_LIST_HEAD(&hdev->long_term_keys);
2630 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002631 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002632
2633 INIT_WORK(&hdev->rx_work, hci_rx_work);
2634 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2635 INIT_WORK(&hdev->tx_work, hci_tx_work);
2636 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002637
David Herrmannb1b813d2012-04-22 14:39:58 +02002638 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2639 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2640 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2641
David Herrmannb1b813d2012-04-22 14:39:58 +02002642 skb_queue_head_init(&hdev->rx_q);
2643 skb_queue_head_init(&hdev->cmd_q);
2644 skb_queue_head_init(&hdev->raw_q);
2645
2646 init_waitqueue_head(&hdev->req_wait_q);
2647
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002648 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002649
David Herrmannb1b813d2012-04-22 14:39:58 +02002650 hci_init_sysfs(hdev);
2651 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002652
2653 return hdev;
2654}
2655EXPORT_SYMBOL(hci_alloc_dev);
2656
2657/* Free HCI device */
2658void hci_free_dev(struct hci_dev *hdev)
2659{
David Herrmann9be0dab2012-04-22 14:39:57 +02002660 /* will free via device release */
2661 put_device(&hdev->dev);
2662}
2663EXPORT_SYMBOL(hci_free_dev);
2664
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665/* Register HCI device */
2666int hci_register_dev(struct hci_dev *hdev)
2667{
David Herrmannb1b813d2012-04-22 14:39:58 +02002668 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
David Herrmann010666a2012-01-07 15:47:07 +01002670 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 return -EINVAL;
2672
Mat Martineau08add512011-11-02 16:18:36 -07002673 /* Do not allow HCI_AMP devices to register at index 0,
2674 * so the index can be used as the AMP controller ID.
2675 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002676 switch (hdev->dev_type) {
2677 case HCI_BREDR:
2678 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2679 break;
2680 case HCI_AMP:
2681 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2682 break;
2683 default:
2684 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002686
Sasha Levin3df92b32012-05-27 22:36:56 +02002687 if (id < 0)
2688 return id;
2689
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 sprintf(hdev->name, "hci%d", id);
2691 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002692
2693 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2694
Kees Cookd8537542013-07-03 15:04:57 -07002695 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2696 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002697 if (!hdev->workqueue) {
2698 error = -ENOMEM;
2699 goto err;
2700 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002701
Kees Cookd8537542013-07-03 15:04:57 -07002702 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2703 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002704 if (!hdev->req_workqueue) {
2705 destroy_workqueue(hdev->workqueue);
2706 error = -ENOMEM;
2707 goto err;
2708 }
2709
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002710 if (!IS_ERR_OR_NULL(bt_debugfs))
2711 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2712
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002713 dev_set_name(&hdev->dev, "%s", hdev->name);
2714
2715 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002716 if (error < 0)
2717 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002719 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002720 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2721 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002722 if (hdev->rfkill) {
2723 if (rfkill_register(hdev->rfkill) < 0) {
2724 rfkill_destroy(hdev->rfkill);
2725 hdev->rfkill = NULL;
2726 }
2727 }
2728
Johan Hedberg5e130362013-09-13 08:58:17 +03002729 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2730 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2731
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002732 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002733 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002734
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002735 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002736 /* Assume BR/EDR support until proven otherwise (such as
2737 * through reading supported features during init.
2738 */
2739 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2740 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002741
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002742 write_lock(&hci_dev_list_lock);
2743 list_add(&hdev->list, &hci_dev_list);
2744 write_unlock(&hci_dev_list_lock);
2745
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002747 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748
Johan Hedberg19202572013-01-14 22:33:51 +02002749 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002750
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002752
David Herrmann33ca9542011-10-08 14:58:49 +02002753err_wqueue:
2754 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002755 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002756err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002757 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002758
David Herrmann33ca9542011-10-08 14:58:49 +02002759 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760}
2761EXPORT_SYMBOL(hci_register_dev);
2762
2763/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002764void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765{
Sasha Levin3df92b32012-05-27 22:36:56 +02002766 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002767
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002768 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
Johan Hovold94324962012-03-15 14:48:41 +01002770 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2771
Sasha Levin3df92b32012-05-27 22:36:56 +02002772 id = hdev->id;
2773
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002774 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002776 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
2778 hci_dev_do_close(hdev);
2779
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302780 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002781 kfree_skb(hdev->reassembly[i]);
2782
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002783 cancel_work_sync(&hdev->power_on);
2784
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002785 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002786 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002787 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002788 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002789 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002790 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002791
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002792 /* mgmt_index_removed should take care of emptying the
2793 * pending list */
2794 BUG_ON(!list_empty(&hdev->mgmt_pending));
2795
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 hci_notify(hdev, HCI_DEV_UNREG);
2797
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002798 if (hdev->rfkill) {
2799 rfkill_unregister(hdev->rfkill);
2800 rfkill_destroy(hdev->rfkill);
2801 }
2802
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002803 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002804
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002805 debugfs_remove_recursive(hdev->debugfs);
2806
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002807 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002808 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002809
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002810 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002811 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002812 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002813 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002814 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002815 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002816 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002817
David Herrmanndc946bd2012-01-07 15:47:24 +01002818 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002819
2820 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821}
2822EXPORT_SYMBOL(hci_unregister_dev);
2823
2824/* Suspend HCI device */
2825int hci_suspend_dev(struct hci_dev *hdev)
2826{
2827 hci_notify(hdev, HCI_DEV_SUSPEND);
2828 return 0;
2829}
2830EXPORT_SYMBOL(hci_suspend_dev);
2831
2832/* Resume HCI device */
2833int hci_resume_dev(struct hci_dev *hdev)
2834{
2835 hci_notify(hdev, HCI_DEV_RESUME);
2836 return 0;
2837}
2838EXPORT_SYMBOL(hci_resume_dev);
2839
Marcel Holtmann76bca882009-11-18 00:40:39 +01002840/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002841int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002842{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002843 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002844 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002845 kfree_skb(skb);
2846 return -ENXIO;
2847 }
2848
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002849 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002850 bt_cb(skb)->incoming = 1;
2851
2852 /* Time stamp */
2853 __net_timestamp(skb);
2854
Marcel Holtmann76bca882009-11-18 00:40:39 +01002855 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002856 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002857
Marcel Holtmann76bca882009-11-18 00:40:39 +01002858 return 0;
2859}
2860EXPORT_SYMBOL(hci_recv_frame);
2861
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302862static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002863 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302864{
2865 int len = 0;
2866 int hlen = 0;
2867 int remain = count;
2868 struct sk_buff *skb;
2869 struct bt_skb_cb *scb;
2870
2871 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002872 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302873 return -EILSEQ;
2874
2875 skb = hdev->reassembly[index];
2876
2877 if (!skb) {
2878 switch (type) {
2879 case HCI_ACLDATA_PKT:
2880 len = HCI_MAX_FRAME_SIZE;
2881 hlen = HCI_ACL_HDR_SIZE;
2882 break;
2883 case HCI_EVENT_PKT:
2884 len = HCI_MAX_EVENT_SIZE;
2885 hlen = HCI_EVENT_HDR_SIZE;
2886 break;
2887 case HCI_SCODATA_PKT:
2888 len = HCI_MAX_SCO_SIZE;
2889 hlen = HCI_SCO_HDR_SIZE;
2890 break;
2891 }
2892
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002893 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302894 if (!skb)
2895 return -ENOMEM;
2896
2897 scb = (void *) skb->cb;
2898 scb->expect = hlen;
2899 scb->pkt_type = type;
2900
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302901 hdev->reassembly[index] = skb;
2902 }
2903
2904 while (count) {
2905 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002906 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302907
2908 memcpy(skb_put(skb, len), data, len);
2909
2910 count -= len;
2911 data += len;
2912 scb->expect -= len;
2913 remain = count;
2914
2915 switch (type) {
2916 case HCI_EVENT_PKT:
2917 if (skb->len == HCI_EVENT_HDR_SIZE) {
2918 struct hci_event_hdr *h = hci_event_hdr(skb);
2919 scb->expect = h->plen;
2920
2921 if (skb_tailroom(skb) < scb->expect) {
2922 kfree_skb(skb);
2923 hdev->reassembly[index] = NULL;
2924 return -ENOMEM;
2925 }
2926 }
2927 break;
2928
2929 case HCI_ACLDATA_PKT:
2930 if (skb->len == HCI_ACL_HDR_SIZE) {
2931 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2932 scb->expect = __le16_to_cpu(h->dlen);
2933
2934 if (skb_tailroom(skb) < scb->expect) {
2935 kfree_skb(skb);
2936 hdev->reassembly[index] = NULL;
2937 return -ENOMEM;
2938 }
2939 }
2940 break;
2941
2942 case HCI_SCODATA_PKT:
2943 if (skb->len == HCI_SCO_HDR_SIZE) {
2944 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2945 scb->expect = h->dlen;
2946
2947 if (skb_tailroom(skb) < scb->expect) {
2948 kfree_skb(skb);
2949 hdev->reassembly[index] = NULL;
2950 return -ENOMEM;
2951 }
2952 }
2953 break;
2954 }
2955
2956 if (scb->expect == 0) {
2957 /* Complete frame */
2958
2959 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002960 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302961
2962 hdev->reassembly[index] = NULL;
2963 return remain;
2964 }
2965 }
2966
2967 return remain;
2968}
2969
Marcel Holtmannef222012007-07-11 06:42:04 +02002970int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2971{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302972 int rem = 0;
2973
Marcel Holtmannef222012007-07-11 06:42:04 +02002974 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2975 return -EILSEQ;
2976
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002977 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002978 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302979 if (rem < 0)
2980 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002981
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302982 data += (count - rem);
2983 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002984 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002985
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302986 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002987}
2988EXPORT_SYMBOL(hci_recv_fragment);
2989
Suraj Sumangala99811512010-07-14 13:02:19 +05302990#define STREAM_REASSEMBLY 0
2991
2992int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2993{
2994 int type;
2995 int rem = 0;
2996
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002997 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302998 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2999
3000 if (!skb) {
3001 struct { char type; } *pkt;
3002
3003 /* Start of the frame */
3004 pkt = data;
3005 type = pkt->type;
3006
3007 data++;
3008 count--;
3009 } else
3010 type = bt_cb(skb)->pkt_type;
3011
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003012 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003013 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303014 if (rem < 0)
3015 return rem;
3016
3017 data += (count - rem);
3018 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003019 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303020
3021 return rem;
3022}
3023EXPORT_SYMBOL(hci_recv_stream_fragment);
3024
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025/* ---- Interface to upper protocols ---- */
3026
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027int hci_register_cb(struct hci_cb *cb)
3028{
3029 BT_DBG("%p name %s", cb, cb->name);
3030
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003031 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003033 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034
3035 return 0;
3036}
3037EXPORT_SYMBOL(hci_register_cb);
3038
3039int hci_unregister_cb(struct hci_cb *cb)
3040{
3041 BT_DBG("%p name %s", cb, cb->name);
3042
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003043 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003045 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046
3047 return 0;
3048}
3049EXPORT_SYMBOL(hci_unregister_cb);
3050
Marcel Holtmann51086992013-10-10 14:54:19 -07003051static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003053 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003055 /* Time stamp */
3056 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003058 /* Send copy to monitor */
3059 hci_send_to_monitor(hdev, skb);
3060
3061 if (atomic_read(&hdev->promisc)) {
3062 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003063 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 }
3065
3066 /* Get rid of skb owner, prior to sending to the driver. */
3067 skb_orphan(skb);
3068
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003069 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003070 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071}
3072
Johan Hedberg3119ae92013-03-05 20:37:44 +02003073void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3074{
3075 skb_queue_head_init(&req->cmd_q);
3076 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003077 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003078}
3079
3080int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3081{
3082 struct hci_dev *hdev = req->hdev;
3083 struct sk_buff *skb;
3084 unsigned long flags;
3085
3086 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3087
Andre Guedes5d73e032013-03-08 11:20:16 -03003088 /* If an error occured during request building, remove all HCI
3089 * commands queued on the HCI request queue.
3090 */
3091 if (req->err) {
3092 skb_queue_purge(&req->cmd_q);
3093 return req->err;
3094 }
3095
Johan Hedberg3119ae92013-03-05 20:37:44 +02003096 /* Do not allow empty requests */
3097 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003098 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003099
3100 skb = skb_peek_tail(&req->cmd_q);
3101 bt_cb(skb)->req.complete = complete;
3102
3103 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3104 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3105 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3106
3107 queue_work(hdev->workqueue, &hdev->cmd_work);
3108
3109 return 0;
3110}
3111
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003112static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003113 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114{
3115 int len = HCI_COMMAND_HDR_SIZE + plen;
3116 struct hci_command_hdr *hdr;
3117 struct sk_buff *skb;
3118
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003120 if (!skb)
3121 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122
3123 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003124 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 hdr->plen = plen;
3126
3127 if (plen)
3128 memcpy(skb_put(skb, plen), param, plen);
3129
3130 BT_DBG("skb len %d", skb->len);
3131
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003132 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003133
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003134 return skb;
3135}
3136
3137/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003138int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3139 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003140{
3141 struct sk_buff *skb;
3142
3143 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3144
3145 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3146 if (!skb) {
3147 BT_ERR("%s no memory for command", hdev->name);
3148 return -ENOMEM;
3149 }
3150
Johan Hedberg11714b32013-03-05 20:37:47 +02003151 /* Stand-alone HCI commands must be flaged as
3152 * single-command requests.
3153 */
3154 bt_cb(skb)->req.start = true;
3155
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003157 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158
3159 return 0;
3160}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161
Johan Hedberg71c76a12013-03-05 20:37:46 +02003162/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003163void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3164 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003165{
3166 struct hci_dev *hdev = req->hdev;
3167 struct sk_buff *skb;
3168
3169 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3170
Andre Guedes34739c12013-03-08 11:20:18 -03003171 /* If an error occured during request building, there is no point in
3172 * queueing the HCI command. We can simply return.
3173 */
3174 if (req->err)
3175 return;
3176
Johan Hedberg71c76a12013-03-05 20:37:46 +02003177 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3178 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003179 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3180 hdev->name, opcode);
3181 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003182 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003183 }
3184
3185 if (skb_queue_empty(&req->cmd_q))
3186 bt_cb(skb)->req.start = true;
3187
Johan Hedberg02350a72013-04-03 21:50:29 +03003188 bt_cb(skb)->req.event = event;
3189
Johan Hedberg71c76a12013-03-05 20:37:46 +02003190 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003191}
3192
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003193void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3194 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003195{
3196 hci_req_add_ev(req, opcode, plen, param, 0);
3197}
3198
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003200void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201{
3202 struct hci_command_hdr *hdr;
3203
3204 if (!hdev->sent_cmd)
3205 return NULL;
3206
3207 hdr = (void *) hdev->sent_cmd->data;
3208
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003209 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 return NULL;
3211
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003212 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
3214 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3215}
3216
3217/* Send ACL data */
3218static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3219{
3220 struct hci_acl_hdr *hdr;
3221 int len = skb->len;
3222
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003223 skb_push(skb, HCI_ACL_HDR_SIZE);
3224 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003225 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003226 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3227 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228}
3229
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003230static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003231 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003233 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 struct hci_dev *hdev = conn->hdev;
3235 struct sk_buff *list;
3236
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003237 skb->len = skb_headlen(skb);
3238 skb->data_len = 0;
3239
3240 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003241
3242 switch (hdev->dev_type) {
3243 case HCI_BREDR:
3244 hci_add_acl_hdr(skb, conn->handle, flags);
3245 break;
3246 case HCI_AMP:
3247 hci_add_acl_hdr(skb, chan->handle, flags);
3248 break;
3249 default:
3250 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3251 return;
3252 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003253
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003254 list = skb_shinfo(skb)->frag_list;
3255 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 /* Non fragmented */
3257 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3258
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003259 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 } else {
3261 /* Fragmented */
3262 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3263
3264 skb_shinfo(skb)->frag_list = NULL;
3265
3266 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003267 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003269 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003270
3271 flags &= ~ACL_START;
3272 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 do {
3274 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003275
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003276 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003277 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
3279 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3280
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003281 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 } while (list);
3283
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003284 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003286}
3287
3288void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3289{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003290 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003291
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003292 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003293
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003294 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003296 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
3299/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003300void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301{
3302 struct hci_dev *hdev = conn->hdev;
3303 struct hci_sco_hdr hdr;
3304
3305 BT_DBG("%s len %d", hdev->name, skb->len);
3306
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003307 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 hdr.dlen = skb->len;
3309
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003310 skb_push(skb, HCI_SCO_HDR_SIZE);
3311 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003312 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003314 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003315
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003317 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319
3320/* ---- HCI TX task (outgoing data) ---- */
3321
3322/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003323static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3324 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325{
3326 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003327 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003328 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003330 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003332
3333 rcu_read_lock();
3334
3335 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003336 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003338
3339 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3340 continue;
3341
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 num++;
3343
3344 if (c->sent < min) {
3345 min = c->sent;
3346 conn = c;
3347 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003348
3349 if (hci_conn_num(hdev, type) == num)
3350 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 }
3352
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003353 rcu_read_unlock();
3354
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003356 int cnt, q;
3357
3358 switch (conn->type) {
3359 case ACL_LINK:
3360 cnt = hdev->acl_cnt;
3361 break;
3362 case SCO_LINK:
3363 case ESCO_LINK:
3364 cnt = hdev->sco_cnt;
3365 break;
3366 case LE_LINK:
3367 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3368 break;
3369 default:
3370 cnt = 0;
3371 BT_ERR("Unknown link type");
3372 }
3373
3374 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375 *quote = q ? q : 1;
3376 } else
3377 *quote = 0;
3378
3379 BT_DBG("conn %p quote %d", conn, *quote);
3380 return conn;
3381}
3382
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003383static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384{
3385 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003386 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387
Ville Tervobae1f5d92011-02-10 22:38:53 -03003388 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003390 rcu_read_lock();
3391
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003393 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003394 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003395 BT_ERR("%s killing stalled connection %pMR",
3396 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003397 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 }
3399 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003400
3401 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402}
3403
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003404static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3405 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003406{
3407 struct hci_conn_hash *h = &hdev->conn_hash;
3408 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003409 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003410 struct hci_conn *conn;
3411 int cnt, q, conn_num = 0;
3412
3413 BT_DBG("%s", hdev->name);
3414
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003415 rcu_read_lock();
3416
3417 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003418 struct hci_chan *tmp;
3419
3420 if (conn->type != type)
3421 continue;
3422
3423 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3424 continue;
3425
3426 conn_num++;
3427
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003428 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003429 struct sk_buff *skb;
3430
3431 if (skb_queue_empty(&tmp->data_q))
3432 continue;
3433
3434 skb = skb_peek(&tmp->data_q);
3435 if (skb->priority < cur_prio)
3436 continue;
3437
3438 if (skb->priority > cur_prio) {
3439 num = 0;
3440 min = ~0;
3441 cur_prio = skb->priority;
3442 }
3443
3444 num++;
3445
3446 if (conn->sent < min) {
3447 min = conn->sent;
3448 chan = tmp;
3449 }
3450 }
3451
3452 if (hci_conn_num(hdev, type) == conn_num)
3453 break;
3454 }
3455
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003456 rcu_read_unlock();
3457
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003458 if (!chan)
3459 return NULL;
3460
3461 switch (chan->conn->type) {
3462 case ACL_LINK:
3463 cnt = hdev->acl_cnt;
3464 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003465 case AMP_LINK:
3466 cnt = hdev->block_cnt;
3467 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003468 case SCO_LINK:
3469 case ESCO_LINK:
3470 cnt = hdev->sco_cnt;
3471 break;
3472 case LE_LINK:
3473 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3474 break;
3475 default:
3476 cnt = 0;
3477 BT_ERR("Unknown link type");
3478 }
3479
3480 q = cnt / num;
3481 *quote = q ? q : 1;
3482 BT_DBG("chan %p quote %d", chan, *quote);
3483 return chan;
3484}
3485
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003486static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3487{
3488 struct hci_conn_hash *h = &hdev->conn_hash;
3489 struct hci_conn *conn;
3490 int num = 0;
3491
3492 BT_DBG("%s", hdev->name);
3493
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003494 rcu_read_lock();
3495
3496 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003497 struct hci_chan *chan;
3498
3499 if (conn->type != type)
3500 continue;
3501
3502 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3503 continue;
3504
3505 num++;
3506
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003507 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003508 struct sk_buff *skb;
3509
3510 if (chan->sent) {
3511 chan->sent = 0;
3512 continue;
3513 }
3514
3515 if (skb_queue_empty(&chan->data_q))
3516 continue;
3517
3518 skb = skb_peek(&chan->data_q);
3519 if (skb->priority >= HCI_PRIO_MAX - 1)
3520 continue;
3521
3522 skb->priority = HCI_PRIO_MAX - 1;
3523
3524 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003525 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003526 }
3527
3528 if (hci_conn_num(hdev, type) == num)
3529 break;
3530 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003531
3532 rcu_read_unlock();
3533
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003534}
3535
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003536static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3537{
3538 /* Calculate count of blocks used by this packet */
3539 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3540}
3541
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003542static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 if (!test_bit(HCI_RAW, &hdev->flags)) {
3545 /* ACL tx timeout must be longer than maximum
3546 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003547 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003548 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003549 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003551}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003553static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003554{
3555 unsigned int cnt = hdev->acl_cnt;
3556 struct hci_chan *chan;
3557 struct sk_buff *skb;
3558 int quote;
3559
3560 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003561
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003562 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003563 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003564 u32 priority = (skb_peek(&chan->data_q))->priority;
3565 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003566 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003567 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003568
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003569 /* Stop if priority has changed */
3570 if (skb->priority < priority)
3571 break;
3572
3573 skb = skb_dequeue(&chan->data_q);
3574
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003575 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003576 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003577
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003578 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579 hdev->acl_last_tx = jiffies;
3580
3581 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003582 chan->sent++;
3583 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 }
3585 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003586
3587 if (cnt != hdev->acl_cnt)
3588 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589}
3590
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003591static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003592{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003593 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003594 struct hci_chan *chan;
3595 struct sk_buff *skb;
3596 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003597 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003598
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003599 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003600
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003601 BT_DBG("%s", hdev->name);
3602
3603 if (hdev->dev_type == HCI_AMP)
3604 type = AMP_LINK;
3605 else
3606 type = ACL_LINK;
3607
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003608 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003609 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003610 u32 priority = (skb_peek(&chan->data_q))->priority;
3611 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3612 int blocks;
3613
3614 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003615 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003616
3617 /* Stop if priority has changed */
3618 if (skb->priority < priority)
3619 break;
3620
3621 skb = skb_dequeue(&chan->data_q);
3622
3623 blocks = __get_blocks(hdev, skb);
3624 if (blocks > hdev->block_cnt)
3625 return;
3626
3627 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003628 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003629
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003630 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003631 hdev->acl_last_tx = jiffies;
3632
3633 hdev->block_cnt -= blocks;
3634 quote -= blocks;
3635
3636 chan->sent += blocks;
3637 chan->conn->sent += blocks;
3638 }
3639 }
3640
3641 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003642 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003643}
3644
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003645static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003646{
3647 BT_DBG("%s", hdev->name);
3648
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003649 /* No ACL link over BR/EDR controller */
3650 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3651 return;
3652
3653 /* No AMP link over AMP controller */
3654 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003655 return;
3656
3657 switch (hdev->flow_ctl_mode) {
3658 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3659 hci_sched_acl_pkt(hdev);
3660 break;
3661
3662 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3663 hci_sched_acl_blk(hdev);
3664 break;
3665 }
3666}
3667
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003669static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670{
3671 struct hci_conn *conn;
3672 struct sk_buff *skb;
3673 int quote;
3674
3675 BT_DBG("%s", hdev->name);
3676
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003677 if (!hci_conn_num(hdev, SCO_LINK))
3678 return;
3679
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3681 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3682 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003683 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684
3685 conn->sent++;
3686 if (conn->sent == ~0)
3687 conn->sent = 0;
3688 }
3689 }
3690}
3691
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003692static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003693{
3694 struct hci_conn *conn;
3695 struct sk_buff *skb;
3696 int quote;
3697
3698 BT_DBG("%s", hdev->name);
3699
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003700 if (!hci_conn_num(hdev, ESCO_LINK))
3701 return;
3702
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003703 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3704 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003705 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3706 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003707 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003708
3709 conn->sent++;
3710 if (conn->sent == ~0)
3711 conn->sent = 0;
3712 }
3713 }
3714}
3715
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003716static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003717{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003718 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003719 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003720 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003721
3722 BT_DBG("%s", hdev->name);
3723
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003724 if (!hci_conn_num(hdev, LE_LINK))
3725 return;
3726
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003727 if (!test_bit(HCI_RAW, &hdev->flags)) {
3728 /* LE tx timeout must be longer than maximum
3729 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003730 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003731 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003732 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003733 }
3734
3735 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003736 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003737 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003738 u32 priority = (skb_peek(&chan->data_q))->priority;
3739 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003740 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003741 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003742
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003743 /* Stop if priority has changed */
3744 if (skb->priority < priority)
3745 break;
3746
3747 skb = skb_dequeue(&chan->data_q);
3748
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003749 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003750 hdev->le_last_tx = jiffies;
3751
3752 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003753 chan->sent++;
3754 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003755 }
3756 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003757
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003758 if (hdev->le_pkts)
3759 hdev->le_cnt = cnt;
3760 else
3761 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003762
3763 if (cnt != tmp)
3764 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003765}
3766
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003767static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003769 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 struct sk_buff *skb;
3771
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003772 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003773 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774
Marcel Holtmann52de5992013-09-03 18:08:38 -07003775 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3776 /* Schedule queues and send stuff to HCI driver */
3777 hci_sched_acl(hdev);
3778 hci_sched_sco(hdev);
3779 hci_sched_esco(hdev);
3780 hci_sched_le(hdev);
3781 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003782
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 /* Send next queued raw (unknown type) packet */
3784 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003785 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786}
3787
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003788/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
3790/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003791static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792{
3793 struct hci_acl_hdr *hdr = (void *) skb->data;
3794 struct hci_conn *conn;
3795 __u16 handle, flags;
3796
3797 skb_pull(skb, HCI_ACL_HDR_SIZE);
3798
3799 handle = __le16_to_cpu(hdr->handle);
3800 flags = hci_flags(handle);
3801 handle = hci_handle(handle);
3802
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003803 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003804 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805
3806 hdev->stat.acl_rx++;
3807
3808 hci_dev_lock(hdev);
3809 conn = hci_conn_hash_lookup_handle(hdev, handle);
3810 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003811
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003813 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003814
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003816 l2cap_recv_acldata(conn, skb, flags);
3817 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003819 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003820 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 }
3822
3823 kfree_skb(skb);
3824}
3825
3826/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003827static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828{
3829 struct hci_sco_hdr *hdr = (void *) skb->data;
3830 struct hci_conn *conn;
3831 __u16 handle;
3832
3833 skb_pull(skb, HCI_SCO_HDR_SIZE);
3834
3835 handle = __le16_to_cpu(hdr->handle);
3836
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003837 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
3839 hdev->stat.sco_rx++;
3840
3841 hci_dev_lock(hdev);
3842 conn = hci_conn_hash_lookup_handle(hdev, handle);
3843 hci_dev_unlock(hdev);
3844
3845 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003847 sco_recv_scodata(conn, skb);
3848 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003850 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003851 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852 }
3853
3854 kfree_skb(skb);
3855}
3856
Johan Hedberg9238f362013-03-05 20:37:48 +02003857static bool hci_req_is_complete(struct hci_dev *hdev)
3858{
3859 struct sk_buff *skb;
3860
3861 skb = skb_peek(&hdev->cmd_q);
3862 if (!skb)
3863 return true;
3864
3865 return bt_cb(skb)->req.start;
3866}
3867
Johan Hedberg42c6b122013-03-05 20:37:49 +02003868static void hci_resend_last(struct hci_dev *hdev)
3869{
3870 struct hci_command_hdr *sent;
3871 struct sk_buff *skb;
3872 u16 opcode;
3873
3874 if (!hdev->sent_cmd)
3875 return;
3876
3877 sent = (void *) hdev->sent_cmd->data;
3878 opcode = __le16_to_cpu(sent->opcode);
3879 if (opcode == HCI_OP_RESET)
3880 return;
3881
3882 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3883 if (!skb)
3884 return;
3885
3886 skb_queue_head(&hdev->cmd_q, skb);
3887 queue_work(hdev->workqueue, &hdev->cmd_work);
3888}
3889
Johan Hedberg9238f362013-03-05 20:37:48 +02003890void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3891{
3892 hci_req_complete_t req_complete = NULL;
3893 struct sk_buff *skb;
3894 unsigned long flags;
3895
3896 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3897
Johan Hedberg42c6b122013-03-05 20:37:49 +02003898 /* If the completed command doesn't match the last one that was
3899 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003900 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003901 if (!hci_sent_cmd_data(hdev, opcode)) {
3902 /* Some CSR based controllers generate a spontaneous
3903 * reset complete event during init and any pending
3904 * command will never be completed. In such a case we
3905 * need to resend whatever was the last sent
3906 * command.
3907 */
3908 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3909 hci_resend_last(hdev);
3910
Johan Hedberg9238f362013-03-05 20:37:48 +02003911 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003912 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003913
3914 /* If the command succeeded and there's still more commands in
3915 * this request the request is not yet complete.
3916 */
3917 if (!status && !hci_req_is_complete(hdev))
3918 return;
3919
3920 /* If this was the last command in a request the complete
3921 * callback would be found in hdev->sent_cmd instead of the
3922 * command queue (hdev->cmd_q).
3923 */
3924 if (hdev->sent_cmd) {
3925 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003926
3927 if (req_complete) {
3928 /* We must set the complete callback to NULL to
3929 * avoid calling the callback more than once if
3930 * this function gets called again.
3931 */
3932 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3933
Johan Hedberg9238f362013-03-05 20:37:48 +02003934 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003935 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003936 }
3937
3938 /* Remove all pending commands belonging to this request */
3939 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3940 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3941 if (bt_cb(skb)->req.start) {
3942 __skb_queue_head(&hdev->cmd_q, skb);
3943 break;
3944 }
3945
3946 req_complete = bt_cb(skb)->req.complete;
3947 kfree_skb(skb);
3948 }
3949 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3950
3951call_complete:
3952 if (req_complete)
3953 req_complete(hdev, status);
3954}
3955
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003956static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003958 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 struct sk_buff *skb;
3960
3961 BT_DBG("%s", hdev->name);
3962
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003964 /* Send copy to monitor */
3965 hci_send_to_monitor(hdev, skb);
3966
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967 if (atomic_read(&hdev->promisc)) {
3968 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003969 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 }
3971
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003972 if (test_bit(HCI_RAW, &hdev->flags) ||
3973 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 kfree_skb(skb);
3975 continue;
3976 }
3977
3978 if (test_bit(HCI_INIT, &hdev->flags)) {
3979 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003980 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 case HCI_ACLDATA_PKT:
3982 case HCI_SCODATA_PKT:
3983 kfree_skb(skb);
3984 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 }
3987
3988 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003989 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003991 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 hci_event_packet(hdev, skb);
3993 break;
3994
3995 case HCI_ACLDATA_PKT:
3996 BT_DBG("%s ACL data packet", hdev->name);
3997 hci_acldata_packet(hdev, skb);
3998 break;
3999
4000 case HCI_SCODATA_PKT:
4001 BT_DBG("%s SCO data packet", hdev->name);
4002 hci_scodata_packet(hdev, skb);
4003 break;
4004
4005 default:
4006 kfree_skb(skb);
4007 break;
4008 }
4009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010}
4011
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004012static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004014 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 struct sk_buff *skb;
4016
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004017 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4018 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004021 if (atomic_read(&hdev->cmd_cnt)) {
4022 skb = skb_dequeue(&hdev->cmd_q);
4023 if (!skb)
4024 return;
4025
Wei Yongjun7585b972009-02-25 18:29:52 +08004026 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004028 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004029 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004031 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004032 if (test_bit(HCI_RESET, &hdev->flags))
4033 del_timer(&hdev->cmd_timer);
4034 else
4035 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004036 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 } else {
4038 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004039 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040 }
4041 }
4042}