blob: 8710d1febee7ea9a4eba416783b3e3eccf895033 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700190static int link_keys_show(struct seq_file *f, void *ptr)
191{
192 struct hci_dev *hdev = f->private;
193 struct list_head *p, *n;
194
195 hci_dev_lock(hdev);
196 list_for_each_safe(p, n, &hdev->link_keys) {
197 struct link_key *key = list_entry(p, struct link_key, list);
198 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
199 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
200 }
201 hci_dev_unlock(hdev);
202
203 return 0;
204}
205
206static int link_keys_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, link_keys_show, inode->i_private);
209}
210
211static const struct file_operations link_keys_fops = {
212 .open = link_keys_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700218static int dev_class_show(struct seq_file *f, void *ptr)
219{
220 struct hci_dev *hdev = f->private;
221
222 hci_dev_lock(hdev);
223 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
224 hdev->dev_class[1], hdev->dev_class[0]);
225 hci_dev_unlock(hdev);
226
227 return 0;
228}
229
230static int dev_class_open(struct inode *inode, struct file *file)
231{
232 return single_open(file, dev_class_show, inode->i_private);
233}
234
235static const struct file_operations dev_class_fops = {
236 .open = dev_class_open,
237 .read = seq_read,
238 .llseek = seq_lseek,
239 .release = single_release,
240};
241
Marcel Holtmann041000b2013-10-17 12:02:31 -0700242static int voice_setting_get(void *data, u64 *val)
243{
244 struct hci_dev *hdev = data;
245
246 hci_dev_lock(hdev);
247 *val = hdev->voice_setting;
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
254 NULL, "0x%4.4llx\n");
255
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700256static int auto_accept_delay_set(void *data, u64 val)
257{
258 struct hci_dev *hdev = data;
259
260 hci_dev_lock(hdev);
261 hdev->auto_accept_delay = val;
262 hci_dev_unlock(hdev);
263
264 return 0;
265}
266
267static int auto_accept_delay_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->auto_accept_delay;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
279 auto_accept_delay_set, "%llu\n");
280
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700281static int idle_timeout_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 if (val != 0 && (val < 500 || val > 3600000))
286 return -EINVAL;
287
288 hci_dev_lock(hdev);
289 hdev->idle_timeout= val;
290 hci_dev_unlock(hdev);
291
292 return 0;
293}
294
295static int idle_timeout_get(void *data, u64 *val)
296{
297 struct hci_dev *hdev = data;
298
299 hci_dev_lock(hdev);
300 *val = hdev->idle_timeout;
301 hci_dev_unlock(hdev);
302
303 return 0;
304}
305
306DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
307 idle_timeout_set, "%llu\n");
308
309static int sniff_min_interval_set(void *data, u64 val)
310{
311 struct hci_dev *hdev = data;
312
313 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
314 return -EINVAL;
315
316 hci_dev_lock(hdev);
317 hdev->sniff_min_interval= val;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323static int sniff_min_interval_get(void *data, u64 *val)
324{
325 struct hci_dev *hdev = data;
326
327 hci_dev_lock(hdev);
328 *val = hdev->sniff_min_interval;
329 hci_dev_unlock(hdev);
330
331 return 0;
332}
333
334DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
335 sniff_min_interval_set, "%llu\n");
336
337static int sniff_max_interval_set(void *data, u64 val)
338{
339 struct hci_dev *hdev = data;
340
341 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
342 return -EINVAL;
343
344 hci_dev_lock(hdev);
345 hdev->sniff_max_interval= val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int sniff_max_interval_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->sniff_max_interval;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
363 sniff_max_interval_set, "%llu\n");
364
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700365static int static_address_show(struct seq_file *f, void *p)
366{
367 struct hci_dev *hdev = f->private;
368
369 hci_dev_lock(hdev);
370 seq_printf(f, "%pMR\n", &hdev->static_addr);
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int static_address_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, static_address_show, inode->i_private);
379}
380
381static const struct file_operations static_address_fops = {
382 .open = static_address_open,
383 .read = seq_read,
384 .llseek = seq_lseek,
385 .release = single_release,
386};
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/* ---- HCI requests ---- */
389
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394 if (hdev->req_status == HCI_REQ_PEND) {
395 hdev->req_result = result;
396 hdev->req_status = HCI_REQ_DONE;
397 wake_up_interruptible(&hdev->req_wait_q);
398 }
399}
400
401static void hci_req_cancel(struct hci_dev *hdev, int err)
402{
403 BT_DBG("%s err 0x%2.2x", hdev->name, err);
404
405 if (hdev->req_status == HCI_REQ_PEND) {
406 hdev->req_result = err;
407 hdev->req_status = HCI_REQ_CANCELED;
408 wake_up_interruptible(&hdev->req_wait_q);
409 }
410}
411
Fengguang Wu77a63e02013-04-20 16:24:31 +0300412static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
413 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300414{
415 struct hci_ev_cmd_complete *ev;
416 struct hci_event_hdr *hdr;
417 struct sk_buff *skb;
418
419 hci_dev_lock(hdev);
420
421 skb = hdev->recv_evt;
422 hdev->recv_evt = NULL;
423
424 hci_dev_unlock(hdev);
425
426 if (!skb)
427 return ERR_PTR(-ENODATA);
428
429 if (skb->len < sizeof(*hdr)) {
430 BT_ERR("Too short HCI event");
431 goto failed;
432 }
433
434 hdr = (void *) skb->data;
435 skb_pull(skb, HCI_EVENT_HDR_SIZE);
436
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300437 if (event) {
438 if (hdr->evt != event)
439 goto failed;
440 return skb;
441 }
442
Johan Hedberg75e84b72013-04-02 13:35:04 +0300443 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
444 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
445 goto failed;
446 }
447
448 if (skb->len < sizeof(*ev)) {
449 BT_ERR("Too short cmd_complete event");
450 goto failed;
451 }
452
453 ev = (void *) skb->data;
454 skb_pull(skb, sizeof(*ev));
455
456 if (opcode == __le16_to_cpu(ev->opcode))
457 return skb;
458
459 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
460 __le16_to_cpu(ev->opcode));
461
462failed:
463 kfree_skb(skb);
464 return ERR_PTR(-ENODATA);
465}
466
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300467struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300468 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300469{
470 DECLARE_WAITQUEUE(wait, current);
471 struct hci_request req;
472 int err = 0;
473
474 BT_DBG("%s", hdev->name);
475
476 hci_req_init(&req, hdev);
477
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300478 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300479
480 hdev->req_status = HCI_REQ_PEND;
481
482 err = hci_req_run(&req, hci_req_sync_complete);
483 if (err < 0)
484 return ERR_PTR(err);
485
486 add_wait_queue(&hdev->req_wait_q, &wait);
487 set_current_state(TASK_INTERRUPTIBLE);
488
489 schedule_timeout(timeout);
490
491 remove_wait_queue(&hdev->req_wait_q, &wait);
492
493 if (signal_pending(current))
494 return ERR_PTR(-EINTR);
495
496 switch (hdev->req_status) {
497 case HCI_REQ_DONE:
498 err = -bt_to_errno(hdev->req_result);
499 break;
500
501 case HCI_REQ_CANCELED:
502 err = -hdev->req_result;
503 break;
504
505 default:
506 err = -ETIMEDOUT;
507 break;
508 }
509
510 hdev->req_status = hdev->req_result = 0;
511
512 BT_DBG("%s end: err %d", hdev->name, err);
513
514 if (err < 0)
515 return ERR_PTR(err);
516
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300517 return hci_get_cmd_complete(hdev, opcode, event);
518}
519EXPORT_SYMBOL(__hci_cmd_sync_ev);
520
521struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300522 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300523{
524 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300525}
526EXPORT_SYMBOL(__hci_cmd_sync);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200529static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 void (*func)(struct hci_request *req,
531 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200532 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 DECLARE_WAITQUEUE(wait, current);
536 int err = 0;
537
538 BT_DBG("%s start", hdev->name);
539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_req_init(&req, hdev);
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 hdev->req_status = HCI_REQ_PEND;
543
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200545
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 err = hci_req_run(&req, hci_req_sync_complete);
547 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200548 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300549
550 /* ENODATA means the HCI request command queue is empty.
551 * This can happen when a request with conditionals doesn't
552 * trigger any commands to be sent. This is normal behavior
553 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200554 */
Andre Guedes920c8302013-03-08 11:20:15 -0300555 if (err == -ENODATA)
556 return 0;
557
558 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200559 }
560
Andre Guedesbc4445c2013-03-08 11:20:13 -0300561 add_wait_queue(&hdev->req_wait_q, &wait);
562 set_current_state(TASK_INTERRUPTIBLE);
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 schedule_timeout(timeout);
565
566 remove_wait_queue(&hdev->req_wait_q, &wait);
567
568 if (signal_pending(current))
569 return -EINTR;
570
571 switch (hdev->req_status) {
572 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700573 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 break;
575
576 case HCI_REQ_CANCELED:
577 err = -hdev->req_result;
578 break;
579
580 default:
581 err = -ETIMEDOUT;
582 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Johan Hedberga5040ef2011-01-10 13:28:59 +0200585 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 BT_DBG("%s end: err %d", hdev->name, err);
588
589 return err;
590}
591
Johan Hedberg01178cd2013-03-05 20:37:41 +0200592static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 void (*req)(struct hci_request *req,
594 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200595 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
597 int ret;
598
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200599 if (!test_bit(HCI_UP, &hdev->flags))
600 return -ENETDOWN;
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 /* Serialize all requests */
603 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200604 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 hci_req_unlock(hdev);
606
607 return ret;
608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 set_bit(HCI_RESET, &req->hdev->flags);
616 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200621 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200626 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200627 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200628
629 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
Johan Hedberg42c6b122013-03-05 20:37:49 +0200633static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200634{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200635 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200636
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200637 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300639
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700640 /* Read Local Supported Commands */
641 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
642
643 /* Read Local Supported Features */
644 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
645
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300646 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200647 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300648
649 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700651
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700652 /* Read Flow Control Mode */
653 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
654
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700655 /* Read Location Data */
656 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200657}
658
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200660{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200662
663 BT_DBG("%s %ld", hdev->name, opt);
664
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300665 /* Reset */
666 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300668
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200669 switch (hdev->dev_type) {
670 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200672 break;
673
674 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200676 break;
677
678 default:
679 BT_ERR("Unknown device type %d", hdev->dev_type);
680 break;
681 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200682}
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700686 struct hci_dev *hdev = req->hdev;
687
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 __le16 param;
689 __u8 flt_type;
690
691 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200692 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200693
694 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696
697 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699
700 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700703 /* Read Number of Supported IAC */
704 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
705
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700706 /* Read Current IAC LAP */
707 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
708
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709 /* Clear Event Filters */
710 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200712
713 /* Connection accept timeout ~20 secs */
714 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700717 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
718 * but it does not support page scan related HCI commands.
719 */
720 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500721 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
722 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
723 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200724}
725
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300728 struct hci_dev *hdev = req->hdev;
729
Johan Hedberg2177bab2013-03-05 20:37:43 +0200730 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200731 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200732
733 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200734 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200735
736 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738
739 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200741
742 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300744
745 /* LE-only controllers have LE implicitly enabled */
746 if (!lmp_bredr_capable(hdev))
747 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748}
749
750static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
751{
752 if (lmp_ext_inq_capable(hdev))
753 return 0x02;
754
755 if (lmp_inq_rssi_capable(hdev))
756 return 0x01;
757
758 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
759 hdev->lmp_subver == 0x0757)
760 return 0x01;
761
762 if (hdev->manufacturer == 15) {
763 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
764 return 0x01;
765 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
766 return 0x01;
767 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
768 return 0x01;
769 }
770
771 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
772 hdev->lmp_subver == 0x1805)
773 return 0x01;
774
775 return 0x00;
776}
777
Johan Hedberg42c6b122013-03-05 20:37:49 +0200778static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779{
780 u8 mode;
781
Johan Hedberg42c6b122013-03-05 20:37:49 +0200782 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200783
Johan Hedberg42c6b122013-03-05 20:37:49 +0200784 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785}
786
Johan Hedberg42c6b122013-03-05 20:37:49 +0200787static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200788{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200789 struct hci_dev *hdev = req->hdev;
790
Johan Hedberg2177bab2013-03-05 20:37:43 +0200791 /* The second byte is 0xff instead of 0x9f (two reserved bits
792 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
793 * command otherwise.
794 */
795 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
796
797 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
798 * any event mask for pre 1.2 devices.
799 */
800 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
801 return;
802
803 if (lmp_bredr_capable(hdev)) {
804 events[4] |= 0x01; /* Flow Specification Complete */
805 events[4] |= 0x02; /* Inquiry Result with RSSI */
806 events[4] |= 0x04; /* Read Remote Extended Features Complete */
807 events[5] |= 0x08; /* Synchronous Connection Complete */
808 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700809 } else {
810 /* Use a different default for LE-only devices */
811 memset(events, 0, sizeof(events));
812 events[0] |= 0x10; /* Disconnection Complete */
813 events[0] |= 0x80; /* Encryption Change */
814 events[1] |= 0x08; /* Read Remote Version Information Complete */
815 events[1] |= 0x20; /* Command Complete */
816 events[1] |= 0x40; /* Command Status */
817 events[1] |= 0x80; /* Hardware Error */
818 events[2] |= 0x04; /* Number of Completed Packets */
819 events[3] |= 0x02; /* Data Buffer Overflow */
820 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200821 }
822
823 if (lmp_inq_rssi_capable(hdev))
824 events[4] |= 0x02; /* Inquiry Result with RSSI */
825
826 if (lmp_sniffsubr_capable(hdev))
827 events[5] |= 0x20; /* Sniff Subrating */
828
829 if (lmp_pause_enc_capable(hdev))
830 events[5] |= 0x80; /* Encryption Key Refresh Complete */
831
832 if (lmp_ext_inq_capable(hdev))
833 events[5] |= 0x40; /* Extended Inquiry Result */
834
835 if (lmp_no_flush_capable(hdev))
836 events[7] |= 0x01; /* Enhanced Flush Complete */
837
838 if (lmp_lsto_capable(hdev))
839 events[6] |= 0x80; /* Link Supervision Timeout Changed */
840
841 if (lmp_ssp_capable(hdev)) {
842 events[6] |= 0x01; /* IO Capability Request */
843 events[6] |= 0x02; /* IO Capability Response */
844 events[6] |= 0x04; /* User Confirmation Request */
845 events[6] |= 0x08; /* User Passkey Request */
846 events[6] |= 0x10; /* Remote OOB Data Request */
847 events[6] |= 0x20; /* Simple Pairing Complete */
848 events[7] |= 0x04; /* User Passkey Notification */
849 events[7] |= 0x08; /* Keypress Notification */
850 events[7] |= 0x10; /* Remote Host Supported
851 * Features Notification
852 */
853 }
854
855 if (lmp_le_capable(hdev))
856 events[7] |= 0x20; /* LE Meta-Event */
857
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200859
860 if (lmp_le_capable(hdev)) {
861 memset(events, 0, sizeof(events));
862 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
864 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200865 }
866}
867
Johan Hedberg42c6b122013-03-05 20:37:49 +0200868static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870 struct hci_dev *hdev = req->hdev;
871
Johan Hedberg2177bab2013-03-05 20:37:43 +0200872 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300874 else
875 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200876
877 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200878 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200881
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300882 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
883 * local supported commands HCI command.
884 */
885 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200887
888 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700889 /* When SSP is available, then the host features page
890 * should also be available as well. However some
891 * controllers list the max_page as 0 as long as SSP
892 * has not been enabled. To achieve proper debugging
893 * output, force the minimum max_page to 1 at least.
894 */
895 hdev->max_page = 0x01;
896
Johan Hedberg2177bab2013-03-05 20:37:43 +0200897 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
898 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
900 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200901 } else {
902 struct hci_cp_write_eir cp;
903
904 memset(hdev->eir, 0, sizeof(hdev->eir));
905 memset(&cp, 0, sizeof(cp));
906
Johan Hedberg42c6b122013-03-05 20:37:49 +0200907 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200908 }
909 }
910
911 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200913
914 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200915 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200916
917 if (lmp_ext_feat_capable(hdev)) {
918 struct hci_cp_read_local_ext_features cp;
919
920 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
922 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200923 }
924
925 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
926 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
928 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200929 }
930}
931
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200933{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200934 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200935 struct hci_cp_write_def_link_policy cp;
936 u16 link_policy = 0;
937
938 if (lmp_rswitch_capable(hdev))
939 link_policy |= HCI_LP_RSWITCH;
940 if (lmp_hold_capable(hdev))
941 link_policy |= HCI_LP_HOLD;
942 if (lmp_sniff_capable(hdev))
943 link_policy |= HCI_LP_SNIFF;
944 if (lmp_park_capable(hdev))
945 link_policy |= HCI_LP_PARK;
946
947 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200948 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200949}
950
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200952{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200954 struct hci_cp_write_le_host_supported cp;
955
Johan Hedbergc73eee92013-04-19 18:35:21 +0300956 /* LE-only devices do not support explicit enablement */
957 if (!lmp_bredr_capable(hdev))
958 return;
959
Johan Hedberg2177bab2013-03-05 20:37:43 +0200960 memset(&cp, 0, sizeof(cp));
961
962 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
963 cp.le = 0x01;
964 cp.simul = lmp_le_br_capable(hdev);
965 }
966
967 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
969 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200970}
971
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300972static void hci_set_event_mask_page_2(struct hci_request *req)
973{
974 struct hci_dev *hdev = req->hdev;
975 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
976
977 /* If Connectionless Slave Broadcast master role is supported
978 * enable all necessary events for it.
979 */
980 if (hdev->features[2][0] & 0x01) {
981 events[1] |= 0x40; /* Triggered Clock Capture */
982 events[1] |= 0x80; /* Synchronization Train Complete */
983 events[2] |= 0x10; /* Slave Page Response Timeout */
984 events[2] |= 0x20; /* CSB Channel Map Change */
985 }
986
987 /* If Connectionless Slave Broadcast slave role is supported
988 * enable all necessary events for it.
989 */
990 if (hdev->features[2][0] & 0x02) {
991 events[2] |= 0x01; /* Synchronization Train Received */
992 events[2] |= 0x02; /* CSB Receive */
993 events[2] |= 0x04; /* CSB Timeout */
994 events[2] |= 0x08; /* Truncated Page Complete */
995 }
996
997 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
998}
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001001{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001003 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001005 /* Some Broadcom based Bluetooth controllers do not support the
1006 * Delete Stored Link Key command. They are clearly indicating its
1007 * absence in the bit mask of supported commands.
1008 *
1009 * Check the supported commands and only if the the command is marked
1010 * as supported send it. If not supported assume that the controller
1011 * does not have actual support for stored link keys which makes this
1012 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001013 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001014 if (hdev->commands[6] & 0x80) {
1015 struct hci_cp_delete_stored_link_key cp;
1016
1017 bacpy(&cp.bdaddr, BDADDR_ANY);
1018 cp.delete_all = 0x01;
1019 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1020 sizeof(cp), &cp);
1021 }
1022
Johan Hedberg2177bab2013-03-05 20:37:43 +02001023 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025
Marcel Holtmann441ad2d2013-10-15 06:33:52 -07001026 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001028
1029 /* Read features beyond page 1 if available */
1030 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1031 struct hci_cp_read_local_ext_features cp;
1032
1033 cp.page = p;
1034 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1035 sizeof(cp), &cp);
1036 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001037}
1038
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001039static void hci_init4_req(struct hci_request *req, unsigned long opt)
1040{
1041 struct hci_dev *hdev = req->hdev;
1042
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001043 /* Set event mask page 2 if the HCI command for it is supported */
1044 if (hdev->commands[22] & 0x04)
1045 hci_set_event_mask_page_2(req);
1046
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001047 /* Check for Synchronization Train support */
1048 if (hdev->features[2][0] & 0x04)
1049 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1050}
1051
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052static int __hci_init(struct hci_dev *hdev)
1053{
1054 int err;
1055
1056 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1057 if (err < 0)
1058 return err;
1059
1060 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1061 * BR/EDR/LE type controllers. AMP controllers only need the
1062 * first stage init.
1063 */
1064 if (hdev->dev_type != HCI_BREDR)
1065 return 0;
1066
1067 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1068 if (err < 0)
1069 return err;
1070
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001071 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1072 if (err < 0)
1073 return err;
1074
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001075 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1076 if (err < 0)
1077 return err;
1078
1079 /* Only create debugfs entries during the initial setup
1080 * phase and not every time the controller gets powered on.
1081 */
1082 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1083 return 0;
1084
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001085 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1086 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001087 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1088 &hdev->manufacturer);
1089 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1090 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001091 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1092 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001093 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1094
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001095 if (lmp_bredr_capable(hdev)) {
1096 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1097 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001098 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1099 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001100 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1101 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001102 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1103 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001104 }
1105
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001106 if (lmp_ssp_capable(hdev))
1107 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1108 hdev, &auto_accept_delay_fops);
1109
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001110 if (lmp_sniff_capable(hdev)) {
1111 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1112 hdev, &idle_timeout_fops);
1113 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1114 hdev, &sniff_min_interval_fops);
1115 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1116 hdev, &sniff_max_interval_fops);
1117 }
1118
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001119 if (lmp_le_capable(hdev)) {
1120 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1121 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001122 debugfs_create_file("static_address", 0444, hdev->debugfs,
1123 hdev, &static_address_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001124 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001125
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001126 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001127}
1128
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
1131 __u8 scan = opt;
1132
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137}
1138
Johan Hedberg42c6b122013-03-05 20:37:49 +02001139static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
1141 __u8 auth = opt;
1142
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147}
1148
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150{
1151 __u8 encrypt = opt;
1152
Johan Hedberg42c6b122013-03-05 20:37:49 +02001153 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001155 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157}
1158
Johan Hedberg42c6b122013-03-05 20:37:49 +02001159static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001160{
1161 __le16 policy = cpu_to_le16(opt);
1162
Johan Hedberg42c6b122013-03-05 20:37:49 +02001163 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001164
1165 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001167}
1168
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001169/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 * Device is held on return. */
1171struct hci_dev *hci_dev_get(int index)
1172{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001173 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
1175 BT_DBG("%d", index);
1176
1177 if (index < 0)
1178 return NULL;
1179
1180 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001181 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 if (d->id == index) {
1183 hdev = hci_dev_hold(d);
1184 break;
1185 }
1186 }
1187 read_unlock(&hci_dev_list_lock);
1188 return hdev;
1189}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001192
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001193bool hci_discovery_active(struct hci_dev *hdev)
1194{
1195 struct discovery_state *discov = &hdev->discovery;
1196
Andre Guedes6fbe1952012-02-03 17:47:58 -03001197 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001198 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001199 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001200 return true;
1201
Andre Guedes6fbe1952012-02-03 17:47:58 -03001202 default:
1203 return false;
1204 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001205}
1206
Johan Hedbergff9ef572012-01-04 14:23:45 +02001207void hci_discovery_set_state(struct hci_dev *hdev, int state)
1208{
1209 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1210
1211 if (hdev->discovery.state == state)
1212 return;
1213
1214 switch (state) {
1215 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001216 if (hdev->discovery.state != DISCOVERY_STARTING)
1217 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001218 break;
1219 case DISCOVERY_STARTING:
1220 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001221 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001222 mgmt_discovering(hdev, 1);
1223 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001224 case DISCOVERY_RESOLVING:
1225 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001226 case DISCOVERY_STOPPING:
1227 break;
1228 }
1229
1230 hdev->discovery.state = state;
1231}
1232
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001233void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234{
Johan Hedberg30883512012-01-04 14:16:21 +02001235 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001236 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Johan Hedberg561aafb2012-01-04 13:31:59 +02001238 list_for_each_entry_safe(p, n, &cache->all, all) {
1239 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001240 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001242
1243 INIT_LIST_HEAD(&cache->unknown);
1244 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245}
1246
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001247struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1248 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Johan Hedberg30883512012-01-04 14:16:21 +02001250 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 struct inquiry_entry *e;
1252
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001253 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Johan Hedberg561aafb2012-01-04 13:31:59 +02001255 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001257 return e;
1258 }
1259
1260 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261}
1262
Johan Hedberg561aafb2012-01-04 13:31:59 +02001263struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001264 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001265{
Johan Hedberg30883512012-01-04 14:16:21 +02001266 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001267 struct inquiry_entry *e;
1268
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001269 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001270
1271 list_for_each_entry(e, &cache->unknown, list) {
1272 if (!bacmp(&e->data.bdaddr, bdaddr))
1273 return e;
1274 }
1275
1276 return NULL;
1277}
1278
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001279struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001280 bdaddr_t *bdaddr,
1281 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001282{
1283 struct discovery_state *cache = &hdev->discovery;
1284 struct inquiry_entry *e;
1285
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001286 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001287
1288 list_for_each_entry(e, &cache->resolve, list) {
1289 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1290 return e;
1291 if (!bacmp(&e->data.bdaddr, bdaddr))
1292 return e;
1293 }
1294
1295 return NULL;
1296}
1297
Johan Hedberga3d4e202012-01-09 00:53:02 +02001298void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001299 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001300{
1301 struct discovery_state *cache = &hdev->discovery;
1302 struct list_head *pos = &cache->resolve;
1303 struct inquiry_entry *p;
1304
1305 list_del(&ie->list);
1306
1307 list_for_each_entry(p, &cache->resolve, list) {
1308 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001309 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001310 break;
1311 pos = &p->list;
1312 }
1313
1314 list_add(&ie->list, pos);
1315}
1316
Johan Hedberg31754052012-01-04 13:39:52 +02001317bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001318 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319{
Johan Hedberg30883512012-01-04 14:16:21 +02001320 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001321 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001323 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Szymon Janc2b2fec42012-11-20 11:38:54 +01001325 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1326
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001327 if (ssp)
1328 *ssp = data->ssp_mode;
1329
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001330 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001331 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001332 if (ie->data.ssp_mode && ssp)
1333 *ssp = true;
1334
Johan Hedberga3d4e202012-01-09 00:53:02 +02001335 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001336 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001337 ie->data.rssi = data->rssi;
1338 hci_inquiry_cache_update_resolve(hdev, ie);
1339 }
1340
Johan Hedberg561aafb2012-01-04 13:31:59 +02001341 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001342 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001343
Johan Hedberg561aafb2012-01-04 13:31:59 +02001344 /* Entry not in the cache. Add new one. */
1345 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1346 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001347 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001348
1349 list_add(&ie->all, &cache->all);
1350
1351 if (name_known) {
1352 ie->name_state = NAME_KNOWN;
1353 } else {
1354 ie->name_state = NAME_NOT_KNOWN;
1355 list_add(&ie->list, &cache->unknown);
1356 }
1357
1358update:
1359 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001360 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001361 ie->name_state = NAME_KNOWN;
1362 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 }
1364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001365 memcpy(&ie->data, data, sizeof(*data));
1366 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001368
1369 if (ie->name_state == NAME_NOT_KNOWN)
1370 return false;
1371
1372 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
1374
1375static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1376{
Johan Hedberg30883512012-01-04 14:16:21 +02001377 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 struct inquiry_info *info = (struct inquiry_info *) buf;
1379 struct inquiry_entry *e;
1380 int copied = 0;
1381
Johan Hedberg561aafb2012-01-04 13:31:59 +02001382 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001384
1385 if (copied >= num)
1386 break;
1387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 bacpy(&info->bdaddr, &data->bdaddr);
1389 info->pscan_rep_mode = data->pscan_rep_mode;
1390 info->pscan_period_mode = data->pscan_period_mode;
1391 info->pscan_mode = data->pscan_mode;
1392 memcpy(info->dev_class, data->dev_class, 3);
1393 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001396 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 }
1398
1399 BT_DBG("cache %p, copied %d", cache, copied);
1400 return copied;
1401}
1402
Johan Hedberg42c6b122013-03-05 20:37:49 +02001403static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
1405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 struct hci_cp_inquiry cp;
1408
1409 BT_DBG("%s", hdev->name);
1410
1411 if (test_bit(HCI_INQUIRY, &hdev->flags))
1412 return;
1413
1414 /* Start Inquiry */
1415 memcpy(&cp.lap, &ir->lap, 3);
1416 cp.length = ir->length;
1417 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001418 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419}
1420
Andre Guedes3e13fa12013-03-27 20:04:56 -03001421static int wait_inquiry(void *word)
1422{
1423 schedule();
1424 return signal_pending(current);
1425}
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427int hci_inquiry(void __user *arg)
1428{
1429 __u8 __user *ptr = arg;
1430 struct hci_inquiry_req ir;
1431 struct hci_dev *hdev;
1432 int err = 0, do_inquiry = 0, max_rsp;
1433 long timeo;
1434 __u8 *buf;
1435
1436 if (copy_from_user(&ir, ptr, sizeof(ir)))
1437 return -EFAULT;
1438
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001439 hdev = hci_dev_get(ir.dev_id);
1440 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 return -ENODEV;
1442
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001443 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1444 err = -EBUSY;
1445 goto done;
1446 }
1447
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001448 if (hdev->dev_type != HCI_BREDR) {
1449 err = -EOPNOTSUPP;
1450 goto done;
1451 }
1452
Johan Hedberg56f87902013-10-02 13:43:13 +03001453 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1454 err = -EOPNOTSUPP;
1455 goto done;
1456 }
1457
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001458 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001459 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001460 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001461 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 do_inquiry = 1;
1463 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001464 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
Marcel Holtmann04837f62006-07-03 10:02:33 +02001466 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001467
1468 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001469 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1470 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001471 if (err < 0)
1472 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001473
1474 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1475 * cleared). If it is interrupted by a signal, return -EINTR.
1476 */
1477 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1478 TASK_INTERRUPTIBLE))
1479 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001482 /* for unlimited number of responses we will use buffer with
1483 * 255 entries
1484 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1486
1487 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1488 * copy it to the user space.
1489 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001490 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001491 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 err = -ENOMEM;
1493 goto done;
1494 }
1495
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001496 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001498 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500 BT_DBG("num_rsp %d", ir.num_rsp);
1501
1502 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1503 ptr += sizeof(ir);
1504 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001505 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001507 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 err = -EFAULT;
1509
1510 kfree(buf);
1511
1512done:
1513 hci_dev_put(hdev);
1514 return err;
1515}
1516
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001517static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 int ret = 0;
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 BT_DBG("%s %p", hdev->name, hdev);
1522
1523 hci_req_lock(hdev);
1524
Johan Hovold94324962012-03-15 14:48:41 +01001525 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1526 ret = -ENODEV;
1527 goto done;
1528 }
1529
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001530 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1531 /* Check for rfkill but allow the HCI setup stage to
1532 * proceed (which in itself doesn't cause any RF activity).
1533 */
1534 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1535 ret = -ERFKILL;
1536 goto done;
1537 }
1538
1539 /* Check for valid public address or a configured static
1540 * random adddress, but let the HCI setup proceed to
1541 * be able to determine if there is a public address
1542 * or not.
1543 *
1544 * This check is only valid for BR/EDR controllers
1545 * since AMP controllers do not have an address.
1546 */
1547 if (hdev->dev_type == HCI_BREDR &&
1548 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1549 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1550 ret = -EADDRNOTAVAIL;
1551 goto done;
1552 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001553 }
1554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 if (test_bit(HCI_UP, &hdev->flags)) {
1556 ret = -EALREADY;
1557 goto done;
1558 }
1559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 if (hdev->open(hdev)) {
1561 ret = -EIO;
1562 goto done;
1563 }
1564
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001565 atomic_set(&hdev->cmd_cnt, 1);
1566 set_bit(HCI_INIT, &hdev->flags);
1567
1568 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1569 ret = hdev->setup(hdev);
1570
1571 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001572 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1573 set_bit(HCI_RAW, &hdev->flags);
1574
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001575 if (!test_bit(HCI_RAW, &hdev->flags) &&
1576 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001577 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001580 clear_bit(HCI_INIT, &hdev->flags);
1581
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 if (!ret) {
1583 hci_dev_hold(hdev);
1584 set_bit(HCI_UP, &hdev->flags);
1585 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001586 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001587 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001588 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001589 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001590 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001591 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001592 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001593 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001595 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001596 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001597 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 skb_queue_purge(&hdev->cmd_q);
1600 skb_queue_purge(&hdev->rx_q);
1601
1602 if (hdev->flush)
1603 hdev->flush(hdev);
1604
1605 if (hdev->sent_cmd) {
1606 kfree_skb(hdev->sent_cmd);
1607 hdev->sent_cmd = NULL;
1608 }
1609
1610 hdev->close(hdev);
1611 hdev->flags = 0;
1612 }
1613
1614done:
1615 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 return ret;
1617}
1618
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001619/* ---- HCI ioctl helpers ---- */
1620
1621int hci_dev_open(__u16 dev)
1622{
1623 struct hci_dev *hdev;
1624 int err;
1625
1626 hdev = hci_dev_get(dev);
1627 if (!hdev)
1628 return -ENODEV;
1629
Johan Hedberge1d08f42013-10-01 22:44:50 +03001630 /* We need to ensure that no other power on/off work is pending
1631 * before proceeding to call hci_dev_do_open. This is
1632 * particularly important if the setup procedure has not yet
1633 * completed.
1634 */
1635 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1636 cancel_delayed_work(&hdev->power_off);
1637
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001638 /* After this call it is guaranteed that the setup procedure
1639 * has finished. This means that error conditions like RFKILL
1640 * or no valid public or static random address apply.
1641 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001642 flush_workqueue(hdev->req_workqueue);
1643
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001644 err = hci_dev_do_open(hdev);
1645
1646 hci_dev_put(hdev);
1647
1648 return err;
1649}
1650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651static int hci_dev_do_close(struct hci_dev *hdev)
1652{
1653 BT_DBG("%s %p", hdev->name, hdev);
1654
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001655 cancel_delayed_work(&hdev->power_off);
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 hci_req_cancel(hdev, ENODEV);
1658 hci_req_lock(hdev);
1659
1660 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001661 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 hci_req_unlock(hdev);
1663 return 0;
1664 }
1665
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001666 /* Flush RX and TX works */
1667 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001668 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001670 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001671 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001672 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001673 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001674 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001675 }
1676
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001677 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001678 cancel_delayed_work(&hdev->service_cache);
1679
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001680 cancel_delayed_work_sync(&hdev->le_scan_disable);
1681
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001682 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001683 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001685 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 hci_notify(hdev, HCI_DEV_DOWN);
1688
1689 if (hdev->flush)
1690 hdev->flush(hdev);
1691
1692 /* Reset device */
1693 skb_queue_purge(&hdev->cmd_q);
1694 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001695 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001696 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001697 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001699 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 clear_bit(HCI_INIT, &hdev->flags);
1701 }
1702
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001703 /* flush cmd work */
1704 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
1706 /* Drop queues */
1707 skb_queue_purge(&hdev->rx_q);
1708 skb_queue_purge(&hdev->cmd_q);
1709 skb_queue_purge(&hdev->raw_q);
1710
1711 /* Drop last sent command */
1712 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001713 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 kfree_skb(hdev->sent_cmd);
1715 hdev->sent_cmd = NULL;
1716 }
1717
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001718 kfree_skb(hdev->recv_evt);
1719 hdev->recv_evt = NULL;
1720
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 /* After this point our queues are empty
1722 * and no tasks are scheduled. */
1723 hdev->close(hdev);
1724
Johan Hedberg35b973c2013-03-15 17:06:59 -05001725 /* Clear flags */
1726 hdev->flags = 0;
1727 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1728
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001729 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1730 if (hdev->dev_type == HCI_BREDR) {
1731 hci_dev_lock(hdev);
1732 mgmt_powered(hdev, 0);
1733 hci_dev_unlock(hdev);
1734 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001735 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001736
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001737 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001738 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001739
Johan Hedberge59fda82012-02-22 18:11:53 +02001740 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001741 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 hci_req_unlock(hdev);
1744
1745 hci_dev_put(hdev);
1746 return 0;
1747}
1748
1749int hci_dev_close(__u16 dev)
1750{
1751 struct hci_dev *hdev;
1752 int err;
1753
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001754 hdev = hci_dev_get(dev);
1755 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001757
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001758 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1759 err = -EBUSY;
1760 goto done;
1761 }
1762
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001763 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1764 cancel_delayed_work(&hdev->power_off);
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001767
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001768done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 hci_dev_put(hdev);
1770 return err;
1771}
1772
1773int hci_dev_reset(__u16 dev)
1774{
1775 struct hci_dev *hdev;
1776 int ret = 0;
1777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001778 hdev = hci_dev_get(dev);
1779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 return -ENODEV;
1781
1782 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Marcel Holtmann808a0492013-08-26 20:57:58 -07001784 if (!test_bit(HCI_UP, &hdev->flags)) {
1785 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001787 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1790 ret = -EBUSY;
1791 goto done;
1792 }
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 /* Drop queues */
1795 skb_queue_purge(&hdev->rx_q);
1796 skb_queue_purge(&hdev->cmd_q);
1797
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001798 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001799 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001801 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
1803 if (hdev->flush)
1804 hdev->flush(hdev);
1805
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001806 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001807 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001810 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 hci_req_unlock(hdev);
1814 hci_dev_put(hdev);
1815 return ret;
1816}
1817
1818int hci_dev_reset_stat(__u16 dev)
1819{
1820 struct hci_dev *hdev;
1821 int ret = 0;
1822
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001823 hdev = hci_dev_get(dev);
1824 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 return -ENODEV;
1826
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001827 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1828 ret = -EBUSY;
1829 goto done;
1830 }
1831
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1833
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001834done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return ret;
1837}
1838
1839int hci_dev_cmd(unsigned int cmd, void __user *arg)
1840{
1841 struct hci_dev *hdev;
1842 struct hci_dev_req dr;
1843 int err = 0;
1844
1845 if (copy_from_user(&dr, arg, sizeof(dr)))
1846 return -EFAULT;
1847
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001848 hdev = hci_dev_get(dr.dev_id);
1849 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 return -ENODEV;
1851
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001852 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1853 err = -EBUSY;
1854 goto done;
1855 }
1856
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001857 if (hdev->dev_type != HCI_BREDR) {
1858 err = -EOPNOTSUPP;
1859 goto done;
1860 }
1861
Johan Hedberg56f87902013-10-02 13:43:13 +03001862 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1863 err = -EOPNOTSUPP;
1864 goto done;
1865 }
1866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 switch (cmd) {
1868 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001869 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1870 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 break;
1872
1873 case HCISETENCRYPT:
1874 if (!lmp_encrypt_capable(hdev)) {
1875 err = -EOPNOTSUPP;
1876 break;
1877 }
1878
1879 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1880 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001881 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1882 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 if (err)
1884 break;
1885 }
1886
Johan Hedberg01178cd2013-03-05 20:37:41 +02001887 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1888 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 break;
1890
1891 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001892 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1893 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 break;
1895
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001896 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001897 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1898 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001899 break;
1900
1901 case HCISETLINKMODE:
1902 hdev->link_mode = ((__u16) dr.dev_opt) &
1903 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1904 break;
1905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 case HCISETPTYPE:
1907 hdev->pkt_type = (__u16) dr.dev_opt;
1908 break;
1909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001911 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1912 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 break;
1914
1915 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001916 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1917 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 break;
1919
1920 default:
1921 err = -EINVAL;
1922 break;
1923 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001924
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001925done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 hci_dev_put(hdev);
1927 return err;
1928}
1929
1930int hci_get_dev_list(void __user *arg)
1931{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001932 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 struct hci_dev_list_req *dl;
1934 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 int n = 0, size, err;
1936 __u16 dev_num;
1937
1938 if (get_user(dev_num, (__u16 __user *) arg))
1939 return -EFAULT;
1940
1941 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1942 return -EINVAL;
1943
1944 size = sizeof(*dl) + dev_num * sizeof(*dr);
1945
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001946 dl = kzalloc(size, GFP_KERNEL);
1947 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 return -ENOMEM;
1949
1950 dr = dl->dev_req;
1951
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001952 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001953 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001954 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001955 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001956
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001957 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1958 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 (dr + n)->dev_id = hdev->id;
1961 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001962
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 if (++n >= dev_num)
1964 break;
1965 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001966 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968 dl->dev_num = n;
1969 size = sizeof(*dl) + n * sizeof(*dr);
1970
1971 err = copy_to_user(arg, dl, size);
1972 kfree(dl);
1973
1974 return err ? -EFAULT : 0;
1975}
1976
1977int hci_get_dev_info(void __user *arg)
1978{
1979 struct hci_dev *hdev;
1980 struct hci_dev_info di;
1981 int err = 0;
1982
1983 if (copy_from_user(&di, arg, sizeof(di)))
1984 return -EFAULT;
1985
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001986 hdev = hci_dev_get(di.dev_id);
1987 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 return -ENODEV;
1989
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001990 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001991 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001992
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001993 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1994 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 strcpy(di.name, hdev->name);
1997 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001998 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 di.flags = hdev->flags;
2000 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002001 if (lmp_bredr_capable(hdev)) {
2002 di.acl_mtu = hdev->acl_mtu;
2003 di.acl_pkts = hdev->acl_pkts;
2004 di.sco_mtu = hdev->sco_mtu;
2005 di.sco_pkts = hdev->sco_pkts;
2006 } else {
2007 di.acl_mtu = hdev->le_mtu;
2008 di.acl_pkts = hdev->le_pkts;
2009 di.sco_mtu = 0;
2010 di.sco_pkts = 0;
2011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 di.link_policy = hdev->link_policy;
2013 di.link_mode = hdev->link_mode;
2014
2015 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016 memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018 if (copy_to_user(arg, &di, sizeof(di)))
2019 err = -EFAULT;
2020
2021 hci_dev_put(hdev);
2022
2023 return err;
2024}
2025
2026/* ---- Interface to HCI drivers ---- */
2027
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002028static int hci_rfkill_set_block(void *data, bool blocked)
2029{
2030 struct hci_dev *hdev = data;
2031
2032 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002034 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2035 return -EBUSY;
2036
Johan Hedberg5e130362013-09-13 08:58:17 +03002037 if (blocked) {
2038 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002039 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2040 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002041 } else {
2042 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002043 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002044
2045 return 0;
2046}
2047
2048static const struct rfkill_ops hci_rfkill_ops = {
2049 .set_block = hci_rfkill_set_block,
2050};
2051
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002052static void hci_power_on(struct work_struct *work)
2053{
2054 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002055 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002056
2057 BT_DBG("%s", hdev->name);
2058
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002059 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002060 if (err < 0) {
2061 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002062 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002063 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002064
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002065 /* During the HCI setup phase, a few error conditions are
2066 * ignored and they need to be checked now. If they are still
2067 * valid, it is important to turn the device back off.
2068 */
2069 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2070 (hdev->dev_type == HCI_BREDR &&
2071 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2072 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002073 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2074 hci_dev_do_close(hdev);
2075 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002076 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2077 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002078 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002079
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002080 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002081 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002082}
2083
2084static void hci_power_off(struct work_struct *work)
2085{
Johan Hedberg32435532011-11-07 22:16:04 +02002086 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002087 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002088
2089 BT_DBG("%s", hdev->name);
2090
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002091 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002092}
2093
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002094static void hci_discov_off(struct work_struct *work)
2095{
2096 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002097
2098 hdev = container_of(work, struct hci_dev, discov_off.work);
2099
2100 BT_DBG("%s", hdev->name);
2101
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002102 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002103}
2104
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002105int hci_uuids_clear(struct hci_dev *hdev)
2106{
Johan Hedberg48210022013-01-27 00:31:28 +02002107 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002108
Johan Hedberg48210022013-01-27 00:31:28 +02002109 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2110 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002111 kfree(uuid);
2112 }
2113
2114 return 0;
2115}
2116
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002117int hci_link_keys_clear(struct hci_dev *hdev)
2118{
2119 struct list_head *p, *n;
2120
2121 list_for_each_safe(p, n, &hdev->link_keys) {
2122 struct link_key *key;
2123
2124 key = list_entry(p, struct link_key, list);
2125
2126 list_del(p);
2127 kfree(key);
2128 }
2129
2130 return 0;
2131}
2132
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002133int hci_smp_ltks_clear(struct hci_dev *hdev)
2134{
2135 struct smp_ltk *k, *tmp;
2136
2137 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2138 list_del(&k->list);
2139 kfree(k);
2140 }
2141
2142 return 0;
2143}
2144
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002145struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2146{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002147 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002148
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002149 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002150 if (bacmp(bdaddr, &k->bdaddr) == 0)
2151 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002152
2153 return NULL;
2154}
2155
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302156static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002157 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002158{
2159 /* Legacy key */
2160 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002162
2163 /* Debug keys are insecure so don't store them persistently */
2164 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302165 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002166
2167 /* Changed combination key and there's no previous one */
2168 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302169 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002170
2171 /* Security mode 3 case */
2172 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302173 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002174
2175 /* Neither local nor remote side had no-bonding as requirement */
2176 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302177 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002178
2179 /* Local side had dedicated bonding as requirement */
2180 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302181 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002182
2183 /* Remote side had dedicated bonding as requirement */
2184 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302185 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002186
2187 /* If none of the above criteria match, then don't store the key
2188 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302189 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002190}
2191
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002192struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002193{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002194 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002195
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002196 list_for_each_entry(k, &hdev->long_term_keys, list) {
2197 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002198 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002199 continue;
2200
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002201 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002202 }
2203
2204 return NULL;
2205}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002206
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002207struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002208 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002209{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002210 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002211
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002212 list_for_each_entry(k, &hdev->long_term_keys, list)
2213 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002214 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002215 return k;
2216
2217 return NULL;
2218}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002219
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002220int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002221 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002222{
2223 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302224 u8 old_key_type;
2225 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002226
2227 old_key = hci_find_link_key(hdev, bdaddr);
2228 if (old_key) {
2229 old_key_type = old_key->type;
2230 key = old_key;
2231 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002232 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002233 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2234 if (!key)
2235 return -ENOMEM;
2236 list_add(&key->list, &hdev->link_keys);
2237 }
2238
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002239 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002240
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002241 /* Some buggy controller combinations generate a changed
2242 * combination key for legacy pairing even when there's no
2243 * previous key */
2244 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002245 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002246 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002247 if (conn)
2248 conn->key_type = type;
2249 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002250
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002251 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002252 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002253 key->pin_len = pin_len;
2254
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002255 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002256 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002257 else
2258 key->type = type;
2259
Johan Hedberg4df378a2011-04-28 11:29:03 -07002260 if (!new_key)
2261 return 0;
2262
2263 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2264
Johan Hedberg744cf192011-11-08 20:40:14 +02002265 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002266
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302267 if (conn)
2268 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002269
2270 return 0;
2271}
2272
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002273int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002274 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002275 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002276{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002277 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002278
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002279 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2280 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002281
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002282 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2283 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002284 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002285 else {
2286 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002287 if (!key)
2288 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002289 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002290 }
2291
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002292 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002293 key->bdaddr_type = addr_type;
2294 memcpy(key->val, tk, sizeof(key->val));
2295 key->authenticated = authenticated;
2296 key->ediv = ediv;
2297 key->enc_size = enc_size;
2298 key->type = type;
2299 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002300
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002301 if (!new_key)
2302 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002303
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002304 if (type & HCI_SMP_LTK)
2305 mgmt_new_ltk(hdev, key, 1);
2306
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002307 return 0;
2308}
2309
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002310int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2311{
2312 struct link_key *key;
2313
2314 key = hci_find_link_key(hdev, bdaddr);
2315 if (!key)
2316 return -ENOENT;
2317
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002318 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002319
2320 list_del(&key->list);
2321 kfree(key);
2322
2323 return 0;
2324}
2325
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002326int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2327{
2328 struct smp_ltk *k, *tmp;
2329
2330 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2331 if (bacmp(bdaddr, &k->bdaddr))
2332 continue;
2333
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002334 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002335
2336 list_del(&k->list);
2337 kfree(k);
2338 }
2339
2340 return 0;
2341}
2342
Ville Tervo6bd32322011-02-16 16:32:41 +02002343/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002344static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002345{
2346 struct hci_dev *hdev = (void *) arg;
2347
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002348 if (hdev->sent_cmd) {
2349 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2350 u16 opcode = __le16_to_cpu(sent->opcode);
2351
2352 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2353 } else {
2354 BT_ERR("%s command tx timeout", hdev->name);
2355 }
2356
Ville Tervo6bd32322011-02-16 16:32:41 +02002357 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002358 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002359}
2360
Szymon Janc2763eda2011-03-22 13:12:22 +01002361struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002362 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002363{
2364 struct oob_data *data;
2365
2366 list_for_each_entry(data, &hdev->remote_oob_data, list)
2367 if (bacmp(bdaddr, &data->bdaddr) == 0)
2368 return data;
2369
2370 return NULL;
2371}
2372
2373int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2374{
2375 struct oob_data *data;
2376
2377 data = hci_find_remote_oob_data(hdev, bdaddr);
2378 if (!data)
2379 return -ENOENT;
2380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002381 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002382
2383 list_del(&data->list);
2384 kfree(data);
2385
2386 return 0;
2387}
2388
2389int hci_remote_oob_data_clear(struct hci_dev *hdev)
2390{
2391 struct oob_data *data, *n;
2392
2393 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2394 list_del(&data->list);
2395 kfree(data);
2396 }
2397
2398 return 0;
2399}
2400
2401int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002402 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002403{
2404 struct oob_data *data;
2405
2406 data = hci_find_remote_oob_data(hdev, bdaddr);
2407
2408 if (!data) {
2409 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2410 if (!data)
2411 return -ENOMEM;
2412
2413 bacpy(&data->bdaddr, bdaddr);
2414 list_add(&data->list, &hdev->remote_oob_data);
2415 }
2416
2417 memcpy(data->hash, hash, sizeof(data->hash));
2418 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2419
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002420 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002421
2422 return 0;
2423}
2424
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002425struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2426 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002427{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002428 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002429
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002430 list_for_each_entry(b, &hdev->blacklist, list) {
2431 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002432 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002433 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002434
2435 return NULL;
2436}
2437
2438int hci_blacklist_clear(struct hci_dev *hdev)
2439{
2440 struct list_head *p, *n;
2441
2442 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002443 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002444
2445 list_del(p);
2446 kfree(b);
2447 }
2448
2449 return 0;
2450}
2451
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002452int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002453{
2454 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002455
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002456 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002457 return -EBADF;
2458
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002459 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002460 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002461
2462 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002463 if (!entry)
2464 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002465
2466 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002467 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002468
2469 list_add(&entry->list, &hdev->blacklist);
2470
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002471 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002472}
2473
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002474int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002475{
2476 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002477
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002478 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002479 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002480
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002481 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002482 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002483 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002484
2485 list_del(&entry->list);
2486 kfree(entry);
2487
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002488 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002489}
2490
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002491static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002492{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002493 if (status) {
2494 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002495
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002496 hci_dev_lock(hdev);
2497 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2498 hci_dev_unlock(hdev);
2499 return;
2500 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002501}
2502
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002503static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002504{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002505 /* General inquiry access code (GIAC) */
2506 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2507 struct hci_request req;
2508 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002509 int err;
2510
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002511 if (status) {
2512 BT_ERR("Failed to disable LE scanning: status %d", status);
2513 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002514 }
2515
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002516 switch (hdev->discovery.type) {
2517 case DISCOV_TYPE_LE:
2518 hci_dev_lock(hdev);
2519 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2520 hci_dev_unlock(hdev);
2521 break;
2522
2523 case DISCOV_TYPE_INTERLEAVED:
2524 hci_req_init(&req, hdev);
2525
2526 memset(&cp, 0, sizeof(cp));
2527 memcpy(&cp.lap, lap, sizeof(cp.lap));
2528 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2529 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2530
2531 hci_dev_lock(hdev);
2532
2533 hci_inquiry_cache_flush(hdev);
2534
2535 err = hci_req_run(&req, inquiry_complete);
2536 if (err) {
2537 BT_ERR("Inquiry request failed: err %d", err);
2538 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2539 }
2540
2541 hci_dev_unlock(hdev);
2542 break;
2543 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002544}
2545
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002546static void le_scan_disable_work(struct work_struct *work)
2547{
2548 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002549 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002550 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002551 struct hci_request req;
2552 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002553
2554 BT_DBG("%s", hdev->name);
2555
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002556 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002557
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002558 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002559 cp.enable = LE_SCAN_DISABLE;
2560 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002561
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002562 err = hci_req_run(&req, le_scan_disable_work_complete);
2563 if (err)
2564 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002565}
2566
David Herrmann9be0dab2012-04-22 14:39:57 +02002567/* Alloc HCI device */
2568struct hci_dev *hci_alloc_dev(void)
2569{
2570 struct hci_dev *hdev;
2571
2572 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2573 if (!hdev)
2574 return NULL;
2575
David Herrmannb1b813d2012-04-22 14:39:58 +02002576 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2577 hdev->esco_type = (ESCO_HV1);
2578 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002579 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2580 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002581 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2582 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002583
David Herrmannb1b813d2012-04-22 14:39:58 +02002584 hdev->sniff_max_interval = 800;
2585 hdev->sniff_min_interval = 80;
2586
Marcel Holtmannbef64732013-10-11 08:23:19 -07002587 hdev->le_scan_interval = 0x0060;
2588 hdev->le_scan_window = 0x0030;
2589
David Herrmannb1b813d2012-04-22 14:39:58 +02002590 mutex_init(&hdev->lock);
2591 mutex_init(&hdev->req_lock);
2592
2593 INIT_LIST_HEAD(&hdev->mgmt_pending);
2594 INIT_LIST_HEAD(&hdev->blacklist);
2595 INIT_LIST_HEAD(&hdev->uuids);
2596 INIT_LIST_HEAD(&hdev->link_keys);
2597 INIT_LIST_HEAD(&hdev->long_term_keys);
2598 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002599 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002600
2601 INIT_WORK(&hdev->rx_work, hci_rx_work);
2602 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2603 INIT_WORK(&hdev->tx_work, hci_tx_work);
2604 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002605
David Herrmannb1b813d2012-04-22 14:39:58 +02002606 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2607 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2608 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2609
David Herrmannb1b813d2012-04-22 14:39:58 +02002610 skb_queue_head_init(&hdev->rx_q);
2611 skb_queue_head_init(&hdev->cmd_q);
2612 skb_queue_head_init(&hdev->raw_q);
2613
2614 init_waitqueue_head(&hdev->req_wait_q);
2615
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002616 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002617
David Herrmannb1b813d2012-04-22 14:39:58 +02002618 hci_init_sysfs(hdev);
2619 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002620
2621 return hdev;
2622}
2623EXPORT_SYMBOL(hci_alloc_dev);
2624
2625/* Free HCI device */
2626void hci_free_dev(struct hci_dev *hdev)
2627{
David Herrmann9be0dab2012-04-22 14:39:57 +02002628 /* will free via device release */
2629 put_device(&hdev->dev);
2630}
2631EXPORT_SYMBOL(hci_free_dev);
2632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633/* Register HCI device */
2634int hci_register_dev(struct hci_dev *hdev)
2635{
David Herrmannb1b813d2012-04-22 14:39:58 +02002636 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
David Herrmann010666a2012-01-07 15:47:07 +01002638 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 return -EINVAL;
2640
Mat Martineau08add512011-11-02 16:18:36 -07002641 /* Do not allow HCI_AMP devices to register at index 0,
2642 * so the index can be used as the AMP controller ID.
2643 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002644 switch (hdev->dev_type) {
2645 case HCI_BREDR:
2646 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2647 break;
2648 case HCI_AMP:
2649 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2650 break;
2651 default:
2652 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002654
Sasha Levin3df92b32012-05-27 22:36:56 +02002655 if (id < 0)
2656 return id;
2657
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 sprintf(hdev->name, "hci%d", id);
2659 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002660
2661 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2662
Kees Cookd8537542013-07-03 15:04:57 -07002663 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2664 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002665 if (!hdev->workqueue) {
2666 error = -ENOMEM;
2667 goto err;
2668 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002669
Kees Cookd8537542013-07-03 15:04:57 -07002670 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2671 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002672 if (!hdev->req_workqueue) {
2673 destroy_workqueue(hdev->workqueue);
2674 error = -ENOMEM;
2675 goto err;
2676 }
2677
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002678 if (!IS_ERR_OR_NULL(bt_debugfs))
2679 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2680
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002681 dev_set_name(&hdev->dev, "%s", hdev->name);
2682
2683 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002684 if (error < 0)
2685 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002687 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002688 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2689 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002690 if (hdev->rfkill) {
2691 if (rfkill_register(hdev->rfkill) < 0) {
2692 rfkill_destroy(hdev->rfkill);
2693 hdev->rfkill = NULL;
2694 }
2695 }
2696
Johan Hedberg5e130362013-09-13 08:58:17 +03002697 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2698 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2699
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002700 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002701 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002702
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002703 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002704 /* Assume BR/EDR support until proven otherwise (such as
2705 * through reading supported features during init.
2706 */
2707 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2708 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002709
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002710 write_lock(&hci_dev_list_lock);
2711 list_add(&hdev->list, &hci_dev_list);
2712 write_unlock(&hci_dev_list_lock);
2713
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002715 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
Johan Hedberg19202572013-01-14 22:33:51 +02002717 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002718
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002720
David Herrmann33ca9542011-10-08 14:58:49 +02002721err_wqueue:
2722 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002723 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002724err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002725 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002726
David Herrmann33ca9542011-10-08 14:58:49 +02002727 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728}
2729EXPORT_SYMBOL(hci_register_dev);
2730
2731/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002732void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733{
Sasha Levin3df92b32012-05-27 22:36:56 +02002734 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002735
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002736 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
Johan Hovold94324962012-03-15 14:48:41 +01002738 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2739
Sasha Levin3df92b32012-05-27 22:36:56 +02002740 id = hdev->id;
2741
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002742 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002744 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
2746 hci_dev_do_close(hdev);
2747
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302748 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002749 kfree_skb(hdev->reassembly[i]);
2750
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002751 cancel_work_sync(&hdev->power_on);
2752
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002753 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002754 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002755 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002756 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002757 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002758 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002759
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002760 /* mgmt_index_removed should take care of emptying the
2761 * pending list */
2762 BUG_ON(!list_empty(&hdev->mgmt_pending));
2763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 hci_notify(hdev, HCI_DEV_UNREG);
2765
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002766 if (hdev->rfkill) {
2767 rfkill_unregister(hdev->rfkill);
2768 rfkill_destroy(hdev->rfkill);
2769 }
2770
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002771 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002772
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002773 debugfs_remove_recursive(hdev->debugfs);
2774
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002775 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002776 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002777
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002778 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002779 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002780 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002781 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002782 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002783 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002784 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002785
David Herrmanndc946bd2012-01-07 15:47:24 +01002786 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002787
2788 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789}
2790EXPORT_SYMBOL(hci_unregister_dev);
2791
2792/* Suspend HCI device */
2793int hci_suspend_dev(struct hci_dev *hdev)
2794{
2795 hci_notify(hdev, HCI_DEV_SUSPEND);
2796 return 0;
2797}
2798EXPORT_SYMBOL(hci_suspend_dev);
2799
2800/* Resume HCI device */
2801int hci_resume_dev(struct hci_dev *hdev)
2802{
2803 hci_notify(hdev, HCI_DEV_RESUME);
2804 return 0;
2805}
2806EXPORT_SYMBOL(hci_resume_dev);
2807
Marcel Holtmann76bca882009-11-18 00:40:39 +01002808/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002809int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002810{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002811 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002812 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002813 kfree_skb(skb);
2814 return -ENXIO;
2815 }
2816
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002817 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002818 bt_cb(skb)->incoming = 1;
2819
2820 /* Time stamp */
2821 __net_timestamp(skb);
2822
Marcel Holtmann76bca882009-11-18 00:40:39 +01002823 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002824 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002825
Marcel Holtmann76bca882009-11-18 00:40:39 +01002826 return 0;
2827}
2828EXPORT_SYMBOL(hci_recv_frame);
2829
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302830static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002831 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302832{
2833 int len = 0;
2834 int hlen = 0;
2835 int remain = count;
2836 struct sk_buff *skb;
2837 struct bt_skb_cb *scb;
2838
2839 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002840 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302841 return -EILSEQ;
2842
2843 skb = hdev->reassembly[index];
2844
2845 if (!skb) {
2846 switch (type) {
2847 case HCI_ACLDATA_PKT:
2848 len = HCI_MAX_FRAME_SIZE;
2849 hlen = HCI_ACL_HDR_SIZE;
2850 break;
2851 case HCI_EVENT_PKT:
2852 len = HCI_MAX_EVENT_SIZE;
2853 hlen = HCI_EVENT_HDR_SIZE;
2854 break;
2855 case HCI_SCODATA_PKT:
2856 len = HCI_MAX_SCO_SIZE;
2857 hlen = HCI_SCO_HDR_SIZE;
2858 break;
2859 }
2860
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002861 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302862 if (!skb)
2863 return -ENOMEM;
2864
2865 scb = (void *) skb->cb;
2866 scb->expect = hlen;
2867 scb->pkt_type = type;
2868
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302869 hdev->reassembly[index] = skb;
2870 }
2871
2872 while (count) {
2873 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002874 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302875
2876 memcpy(skb_put(skb, len), data, len);
2877
2878 count -= len;
2879 data += len;
2880 scb->expect -= len;
2881 remain = count;
2882
2883 switch (type) {
2884 case HCI_EVENT_PKT:
2885 if (skb->len == HCI_EVENT_HDR_SIZE) {
2886 struct hci_event_hdr *h = hci_event_hdr(skb);
2887 scb->expect = h->plen;
2888
2889 if (skb_tailroom(skb) < scb->expect) {
2890 kfree_skb(skb);
2891 hdev->reassembly[index] = NULL;
2892 return -ENOMEM;
2893 }
2894 }
2895 break;
2896
2897 case HCI_ACLDATA_PKT:
2898 if (skb->len == HCI_ACL_HDR_SIZE) {
2899 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2900 scb->expect = __le16_to_cpu(h->dlen);
2901
2902 if (skb_tailroom(skb) < scb->expect) {
2903 kfree_skb(skb);
2904 hdev->reassembly[index] = NULL;
2905 return -ENOMEM;
2906 }
2907 }
2908 break;
2909
2910 case HCI_SCODATA_PKT:
2911 if (skb->len == HCI_SCO_HDR_SIZE) {
2912 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2913 scb->expect = h->dlen;
2914
2915 if (skb_tailroom(skb) < scb->expect) {
2916 kfree_skb(skb);
2917 hdev->reassembly[index] = NULL;
2918 return -ENOMEM;
2919 }
2920 }
2921 break;
2922 }
2923
2924 if (scb->expect == 0) {
2925 /* Complete frame */
2926
2927 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002928 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302929
2930 hdev->reassembly[index] = NULL;
2931 return remain;
2932 }
2933 }
2934
2935 return remain;
2936}
2937
Marcel Holtmannef222012007-07-11 06:42:04 +02002938int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2939{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302940 int rem = 0;
2941
Marcel Holtmannef222012007-07-11 06:42:04 +02002942 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2943 return -EILSEQ;
2944
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002945 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002946 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302947 if (rem < 0)
2948 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002949
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302950 data += (count - rem);
2951 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002952 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002953
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302954 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002955}
2956EXPORT_SYMBOL(hci_recv_fragment);
2957
Suraj Sumangala99811512010-07-14 13:02:19 +05302958#define STREAM_REASSEMBLY 0
2959
2960int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2961{
2962 int type;
2963 int rem = 0;
2964
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002965 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302966 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2967
2968 if (!skb) {
2969 struct { char type; } *pkt;
2970
2971 /* Start of the frame */
2972 pkt = data;
2973 type = pkt->type;
2974
2975 data++;
2976 count--;
2977 } else
2978 type = bt_cb(skb)->pkt_type;
2979
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002980 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002981 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302982 if (rem < 0)
2983 return rem;
2984
2985 data += (count - rem);
2986 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002987 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302988
2989 return rem;
2990}
2991EXPORT_SYMBOL(hci_recv_stream_fragment);
2992
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993/* ---- Interface to upper protocols ---- */
2994
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995int hci_register_cb(struct hci_cb *cb)
2996{
2997 BT_DBG("%p name %s", cb, cb->name);
2998
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002999 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003001 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002
3003 return 0;
3004}
3005EXPORT_SYMBOL(hci_register_cb);
3006
3007int hci_unregister_cb(struct hci_cb *cb)
3008{
3009 BT_DBG("%p name %s", cb, cb->name);
3010
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003011 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003013 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014
3015 return 0;
3016}
3017EXPORT_SYMBOL(hci_unregister_cb);
3018
Marcel Holtmann51086992013-10-10 14:54:19 -07003019static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003021 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003023 /* Time stamp */
3024 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003026 /* Send copy to monitor */
3027 hci_send_to_monitor(hdev, skb);
3028
3029 if (atomic_read(&hdev->promisc)) {
3030 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003031 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 }
3033
3034 /* Get rid of skb owner, prior to sending to the driver. */
3035 skb_orphan(skb);
3036
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003037 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003038 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039}
3040
Johan Hedberg3119ae92013-03-05 20:37:44 +02003041void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3042{
3043 skb_queue_head_init(&req->cmd_q);
3044 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003045 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003046}
3047
3048int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3049{
3050 struct hci_dev *hdev = req->hdev;
3051 struct sk_buff *skb;
3052 unsigned long flags;
3053
3054 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3055
Andre Guedes5d73e032013-03-08 11:20:16 -03003056 /* If an error occured during request building, remove all HCI
3057 * commands queued on the HCI request queue.
3058 */
3059 if (req->err) {
3060 skb_queue_purge(&req->cmd_q);
3061 return req->err;
3062 }
3063
Johan Hedberg3119ae92013-03-05 20:37:44 +02003064 /* Do not allow empty requests */
3065 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003066 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003067
3068 skb = skb_peek_tail(&req->cmd_q);
3069 bt_cb(skb)->req.complete = complete;
3070
3071 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3072 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3073 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3074
3075 queue_work(hdev->workqueue, &hdev->cmd_work);
3076
3077 return 0;
3078}
3079
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003080static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003081 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082{
3083 int len = HCI_COMMAND_HDR_SIZE + plen;
3084 struct hci_command_hdr *hdr;
3085 struct sk_buff *skb;
3086
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003088 if (!skb)
3089 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
3091 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003092 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 hdr->plen = plen;
3094
3095 if (plen)
3096 memcpy(skb_put(skb, plen), param, plen);
3097
3098 BT_DBG("skb len %d", skb->len);
3099
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003100 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003101
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003102 return skb;
3103}
3104
3105/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003106int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3107 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003108{
3109 struct sk_buff *skb;
3110
3111 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3112
3113 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3114 if (!skb) {
3115 BT_ERR("%s no memory for command", hdev->name);
3116 return -ENOMEM;
3117 }
3118
Johan Hedberg11714b32013-03-05 20:37:47 +02003119 /* Stand-alone HCI commands must be flaged as
3120 * single-command requests.
3121 */
3122 bt_cb(skb)->req.start = true;
3123
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003125 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126
3127 return 0;
3128}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129
Johan Hedberg71c76a12013-03-05 20:37:46 +02003130/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003131void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3132 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003133{
3134 struct hci_dev *hdev = req->hdev;
3135 struct sk_buff *skb;
3136
3137 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3138
Andre Guedes34739c12013-03-08 11:20:18 -03003139 /* If an error occured during request building, there is no point in
3140 * queueing the HCI command. We can simply return.
3141 */
3142 if (req->err)
3143 return;
3144
Johan Hedberg71c76a12013-03-05 20:37:46 +02003145 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3146 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003147 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3148 hdev->name, opcode);
3149 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003150 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003151 }
3152
3153 if (skb_queue_empty(&req->cmd_q))
3154 bt_cb(skb)->req.start = true;
3155
Johan Hedberg02350a72013-04-03 21:50:29 +03003156 bt_cb(skb)->req.event = event;
3157
Johan Hedberg71c76a12013-03-05 20:37:46 +02003158 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003159}
3160
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003161void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3162 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003163{
3164 hci_req_add_ev(req, opcode, plen, param, 0);
3165}
3166
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003168void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169{
3170 struct hci_command_hdr *hdr;
3171
3172 if (!hdev->sent_cmd)
3173 return NULL;
3174
3175 hdr = (void *) hdev->sent_cmd->data;
3176
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003177 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 return NULL;
3179
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003180 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181
3182 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3183}
3184
3185/* Send ACL data */
3186static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3187{
3188 struct hci_acl_hdr *hdr;
3189 int len = skb->len;
3190
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003191 skb_push(skb, HCI_ACL_HDR_SIZE);
3192 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003193 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003194 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3195 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196}
3197
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003198static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003199 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003201 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 struct hci_dev *hdev = conn->hdev;
3203 struct sk_buff *list;
3204
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003205 skb->len = skb_headlen(skb);
3206 skb->data_len = 0;
3207
3208 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003209
3210 switch (hdev->dev_type) {
3211 case HCI_BREDR:
3212 hci_add_acl_hdr(skb, conn->handle, flags);
3213 break;
3214 case HCI_AMP:
3215 hci_add_acl_hdr(skb, chan->handle, flags);
3216 break;
3217 default:
3218 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3219 return;
3220 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003221
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003222 list = skb_shinfo(skb)->frag_list;
3223 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 /* Non fragmented */
3225 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3226
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003227 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 } else {
3229 /* Fragmented */
3230 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3231
3232 skb_shinfo(skb)->frag_list = NULL;
3233
3234 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003235 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003237 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003238
3239 flags &= ~ACL_START;
3240 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 do {
3242 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003243
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003244 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003245 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246
3247 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3248
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003249 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 } while (list);
3251
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003252 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003254}
3255
3256void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3257{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003258 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003259
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003260 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003261
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003262 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003264 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
3267/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003268void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
3270 struct hci_dev *hdev = conn->hdev;
3271 struct hci_sco_hdr hdr;
3272
3273 BT_DBG("%s len %d", hdev->name, skb->len);
3274
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003275 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 hdr.dlen = skb->len;
3277
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003278 skb_push(skb, HCI_SCO_HDR_SIZE);
3279 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003280 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003282 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003283
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003285 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
3288/* ---- HCI TX task (outgoing data) ---- */
3289
3290/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003291static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3292 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293{
3294 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003295 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003296 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003298 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003300
3301 rcu_read_lock();
3302
3303 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003304 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003306
3307 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3308 continue;
3309
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 num++;
3311
3312 if (c->sent < min) {
3313 min = c->sent;
3314 conn = c;
3315 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003316
3317 if (hci_conn_num(hdev, type) == num)
3318 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 }
3320
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003321 rcu_read_unlock();
3322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003324 int cnt, q;
3325
3326 switch (conn->type) {
3327 case ACL_LINK:
3328 cnt = hdev->acl_cnt;
3329 break;
3330 case SCO_LINK:
3331 case ESCO_LINK:
3332 cnt = hdev->sco_cnt;
3333 break;
3334 case LE_LINK:
3335 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3336 break;
3337 default:
3338 cnt = 0;
3339 BT_ERR("Unknown link type");
3340 }
3341
3342 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 *quote = q ? q : 1;
3344 } else
3345 *quote = 0;
3346
3347 BT_DBG("conn %p quote %d", conn, *quote);
3348 return conn;
3349}
3350
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003351static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352{
3353 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003354 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
Ville Tervobae1f5d92011-02-10 22:38:53 -03003356 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003358 rcu_read_lock();
3359
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003361 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003362 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003363 BT_ERR("%s killing stalled connection %pMR",
3364 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003365 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 }
3367 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003368
3369 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370}
3371
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003372static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3373 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003374{
3375 struct hci_conn_hash *h = &hdev->conn_hash;
3376 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003377 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003378 struct hci_conn *conn;
3379 int cnt, q, conn_num = 0;
3380
3381 BT_DBG("%s", hdev->name);
3382
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003383 rcu_read_lock();
3384
3385 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003386 struct hci_chan *tmp;
3387
3388 if (conn->type != type)
3389 continue;
3390
3391 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3392 continue;
3393
3394 conn_num++;
3395
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003396 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003397 struct sk_buff *skb;
3398
3399 if (skb_queue_empty(&tmp->data_q))
3400 continue;
3401
3402 skb = skb_peek(&tmp->data_q);
3403 if (skb->priority < cur_prio)
3404 continue;
3405
3406 if (skb->priority > cur_prio) {
3407 num = 0;
3408 min = ~0;
3409 cur_prio = skb->priority;
3410 }
3411
3412 num++;
3413
3414 if (conn->sent < min) {
3415 min = conn->sent;
3416 chan = tmp;
3417 }
3418 }
3419
3420 if (hci_conn_num(hdev, type) == conn_num)
3421 break;
3422 }
3423
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003424 rcu_read_unlock();
3425
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003426 if (!chan)
3427 return NULL;
3428
3429 switch (chan->conn->type) {
3430 case ACL_LINK:
3431 cnt = hdev->acl_cnt;
3432 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003433 case AMP_LINK:
3434 cnt = hdev->block_cnt;
3435 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003436 case SCO_LINK:
3437 case ESCO_LINK:
3438 cnt = hdev->sco_cnt;
3439 break;
3440 case LE_LINK:
3441 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3442 break;
3443 default:
3444 cnt = 0;
3445 BT_ERR("Unknown link type");
3446 }
3447
3448 q = cnt / num;
3449 *quote = q ? q : 1;
3450 BT_DBG("chan %p quote %d", chan, *quote);
3451 return chan;
3452}
3453
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003454static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3455{
3456 struct hci_conn_hash *h = &hdev->conn_hash;
3457 struct hci_conn *conn;
3458 int num = 0;
3459
3460 BT_DBG("%s", hdev->name);
3461
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003462 rcu_read_lock();
3463
3464 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003465 struct hci_chan *chan;
3466
3467 if (conn->type != type)
3468 continue;
3469
3470 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3471 continue;
3472
3473 num++;
3474
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003475 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003476 struct sk_buff *skb;
3477
3478 if (chan->sent) {
3479 chan->sent = 0;
3480 continue;
3481 }
3482
3483 if (skb_queue_empty(&chan->data_q))
3484 continue;
3485
3486 skb = skb_peek(&chan->data_q);
3487 if (skb->priority >= HCI_PRIO_MAX - 1)
3488 continue;
3489
3490 skb->priority = HCI_PRIO_MAX - 1;
3491
3492 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003493 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003494 }
3495
3496 if (hci_conn_num(hdev, type) == num)
3497 break;
3498 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003499
3500 rcu_read_unlock();
3501
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003502}
3503
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003504static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3505{
3506 /* Calculate count of blocks used by this packet */
3507 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3508}
3509
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003510static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 if (!test_bit(HCI_RAW, &hdev->flags)) {
3513 /* ACL tx timeout must be longer than maximum
3514 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003515 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003516 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003517 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003519}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003521static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003522{
3523 unsigned int cnt = hdev->acl_cnt;
3524 struct hci_chan *chan;
3525 struct sk_buff *skb;
3526 int quote;
3527
3528 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003529
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003530 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003531 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003532 u32 priority = (skb_peek(&chan->data_q))->priority;
3533 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003534 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003535 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003536
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003537 /* Stop if priority has changed */
3538 if (skb->priority < priority)
3539 break;
3540
3541 skb = skb_dequeue(&chan->data_q);
3542
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003543 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003544 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003545
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003546 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 hdev->acl_last_tx = jiffies;
3548
3549 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003550 chan->sent++;
3551 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 }
3553 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003554
3555 if (cnt != hdev->acl_cnt)
3556 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557}
3558
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003559static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003560{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003561 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003562 struct hci_chan *chan;
3563 struct sk_buff *skb;
3564 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003565 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003566
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003567 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003568
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003569 BT_DBG("%s", hdev->name);
3570
3571 if (hdev->dev_type == HCI_AMP)
3572 type = AMP_LINK;
3573 else
3574 type = ACL_LINK;
3575
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003576 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003577 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003578 u32 priority = (skb_peek(&chan->data_q))->priority;
3579 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3580 int blocks;
3581
3582 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003583 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003584
3585 /* Stop if priority has changed */
3586 if (skb->priority < priority)
3587 break;
3588
3589 skb = skb_dequeue(&chan->data_q);
3590
3591 blocks = __get_blocks(hdev, skb);
3592 if (blocks > hdev->block_cnt)
3593 return;
3594
3595 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003596 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003597
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003598 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003599 hdev->acl_last_tx = jiffies;
3600
3601 hdev->block_cnt -= blocks;
3602 quote -= blocks;
3603
3604 chan->sent += blocks;
3605 chan->conn->sent += blocks;
3606 }
3607 }
3608
3609 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003610 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003611}
3612
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003613static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003614{
3615 BT_DBG("%s", hdev->name);
3616
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003617 /* No ACL link over BR/EDR controller */
3618 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3619 return;
3620
3621 /* No AMP link over AMP controller */
3622 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003623 return;
3624
3625 switch (hdev->flow_ctl_mode) {
3626 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3627 hci_sched_acl_pkt(hdev);
3628 break;
3629
3630 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3631 hci_sched_acl_blk(hdev);
3632 break;
3633 }
3634}
3635
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003637static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638{
3639 struct hci_conn *conn;
3640 struct sk_buff *skb;
3641 int quote;
3642
3643 BT_DBG("%s", hdev->name);
3644
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003645 if (!hci_conn_num(hdev, SCO_LINK))
3646 return;
3647
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3649 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3650 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003651 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652
3653 conn->sent++;
3654 if (conn->sent == ~0)
3655 conn->sent = 0;
3656 }
3657 }
3658}
3659
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003660static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003661{
3662 struct hci_conn *conn;
3663 struct sk_buff *skb;
3664 int quote;
3665
3666 BT_DBG("%s", hdev->name);
3667
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003668 if (!hci_conn_num(hdev, ESCO_LINK))
3669 return;
3670
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003671 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3672 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003673 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3674 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003675 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003676
3677 conn->sent++;
3678 if (conn->sent == ~0)
3679 conn->sent = 0;
3680 }
3681 }
3682}
3683
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003684static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003685{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003686 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003687 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003688 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003689
3690 BT_DBG("%s", hdev->name);
3691
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003692 if (!hci_conn_num(hdev, LE_LINK))
3693 return;
3694
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003695 if (!test_bit(HCI_RAW, &hdev->flags)) {
3696 /* LE tx timeout must be longer than maximum
3697 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003698 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003699 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003700 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003701 }
3702
3703 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003704 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003705 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003706 u32 priority = (skb_peek(&chan->data_q))->priority;
3707 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003708 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003709 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003710
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003711 /* Stop if priority has changed */
3712 if (skb->priority < priority)
3713 break;
3714
3715 skb = skb_dequeue(&chan->data_q);
3716
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003717 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003718 hdev->le_last_tx = jiffies;
3719
3720 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003721 chan->sent++;
3722 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003723 }
3724 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003725
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003726 if (hdev->le_pkts)
3727 hdev->le_cnt = cnt;
3728 else
3729 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003730
3731 if (cnt != tmp)
3732 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003733}
3734
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003735static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003737 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738 struct sk_buff *skb;
3739
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003740 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003741 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742
Marcel Holtmann52de5992013-09-03 18:08:38 -07003743 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3744 /* Schedule queues and send stuff to HCI driver */
3745 hci_sched_acl(hdev);
3746 hci_sched_sco(hdev);
3747 hci_sched_esco(hdev);
3748 hci_sched_le(hdev);
3749 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003750
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 /* Send next queued raw (unknown type) packet */
3752 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003753 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754}
3755
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003756/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757
3758/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003759static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760{
3761 struct hci_acl_hdr *hdr = (void *) skb->data;
3762 struct hci_conn *conn;
3763 __u16 handle, flags;
3764
3765 skb_pull(skb, HCI_ACL_HDR_SIZE);
3766
3767 handle = __le16_to_cpu(hdr->handle);
3768 flags = hci_flags(handle);
3769 handle = hci_handle(handle);
3770
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003771 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003772 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773
3774 hdev->stat.acl_rx++;
3775
3776 hci_dev_lock(hdev);
3777 conn = hci_conn_hash_lookup_handle(hdev, handle);
3778 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003779
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003781 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003782
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003784 l2cap_recv_acldata(conn, skb, flags);
3785 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003787 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003788 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 }
3790
3791 kfree_skb(skb);
3792}
3793
3794/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003795static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796{
3797 struct hci_sco_hdr *hdr = (void *) skb->data;
3798 struct hci_conn *conn;
3799 __u16 handle;
3800
3801 skb_pull(skb, HCI_SCO_HDR_SIZE);
3802
3803 handle = __le16_to_cpu(hdr->handle);
3804
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003805 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806
3807 hdev->stat.sco_rx++;
3808
3809 hci_dev_lock(hdev);
3810 conn = hci_conn_hash_lookup_handle(hdev, handle);
3811 hci_dev_unlock(hdev);
3812
3813 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003815 sco_recv_scodata(conn, skb);
3816 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003818 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003819 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 }
3821
3822 kfree_skb(skb);
3823}
3824
Johan Hedberg9238f362013-03-05 20:37:48 +02003825static bool hci_req_is_complete(struct hci_dev *hdev)
3826{
3827 struct sk_buff *skb;
3828
3829 skb = skb_peek(&hdev->cmd_q);
3830 if (!skb)
3831 return true;
3832
3833 return bt_cb(skb)->req.start;
3834}
3835
Johan Hedberg42c6b122013-03-05 20:37:49 +02003836static void hci_resend_last(struct hci_dev *hdev)
3837{
3838 struct hci_command_hdr *sent;
3839 struct sk_buff *skb;
3840 u16 opcode;
3841
3842 if (!hdev->sent_cmd)
3843 return;
3844
3845 sent = (void *) hdev->sent_cmd->data;
3846 opcode = __le16_to_cpu(sent->opcode);
3847 if (opcode == HCI_OP_RESET)
3848 return;
3849
3850 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3851 if (!skb)
3852 return;
3853
3854 skb_queue_head(&hdev->cmd_q, skb);
3855 queue_work(hdev->workqueue, &hdev->cmd_work);
3856}
3857
Johan Hedberg9238f362013-03-05 20:37:48 +02003858void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3859{
3860 hci_req_complete_t req_complete = NULL;
3861 struct sk_buff *skb;
3862 unsigned long flags;
3863
3864 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3865
Johan Hedberg42c6b122013-03-05 20:37:49 +02003866 /* If the completed command doesn't match the last one that was
3867 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003868 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003869 if (!hci_sent_cmd_data(hdev, opcode)) {
3870 /* Some CSR based controllers generate a spontaneous
3871 * reset complete event during init and any pending
3872 * command will never be completed. In such a case we
3873 * need to resend whatever was the last sent
3874 * command.
3875 */
3876 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3877 hci_resend_last(hdev);
3878
Johan Hedberg9238f362013-03-05 20:37:48 +02003879 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003880 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003881
3882 /* If the command succeeded and there's still more commands in
3883 * this request the request is not yet complete.
3884 */
3885 if (!status && !hci_req_is_complete(hdev))
3886 return;
3887
3888 /* If this was the last command in a request the complete
3889 * callback would be found in hdev->sent_cmd instead of the
3890 * command queue (hdev->cmd_q).
3891 */
3892 if (hdev->sent_cmd) {
3893 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003894
3895 if (req_complete) {
3896 /* We must set the complete callback to NULL to
3897 * avoid calling the callback more than once if
3898 * this function gets called again.
3899 */
3900 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3901
Johan Hedberg9238f362013-03-05 20:37:48 +02003902 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003903 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003904 }
3905
3906 /* Remove all pending commands belonging to this request */
3907 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3908 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3909 if (bt_cb(skb)->req.start) {
3910 __skb_queue_head(&hdev->cmd_q, skb);
3911 break;
3912 }
3913
3914 req_complete = bt_cb(skb)->req.complete;
3915 kfree_skb(skb);
3916 }
3917 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3918
3919call_complete:
3920 if (req_complete)
3921 req_complete(hdev, status);
3922}
3923
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003924static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003926 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 struct sk_buff *skb;
3928
3929 BT_DBG("%s", hdev->name);
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003932 /* Send copy to monitor */
3933 hci_send_to_monitor(hdev, skb);
3934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 if (atomic_read(&hdev->promisc)) {
3936 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003937 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 }
3939
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003940 if (test_bit(HCI_RAW, &hdev->flags) ||
3941 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 kfree_skb(skb);
3943 continue;
3944 }
3945
3946 if (test_bit(HCI_INIT, &hdev->flags)) {
3947 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003948 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 case HCI_ACLDATA_PKT:
3950 case HCI_SCODATA_PKT:
3951 kfree_skb(skb);
3952 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 }
3955
3956 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003957 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003959 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 hci_event_packet(hdev, skb);
3961 break;
3962
3963 case HCI_ACLDATA_PKT:
3964 BT_DBG("%s ACL data packet", hdev->name);
3965 hci_acldata_packet(hdev, skb);
3966 break;
3967
3968 case HCI_SCODATA_PKT:
3969 BT_DBG("%s SCO data packet", hdev->name);
3970 hci_scodata_packet(hdev, skb);
3971 break;
3972
3973 default:
3974 kfree_skb(skb);
3975 break;
3976 }
3977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978}
3979
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003980static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003982 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 struct sk_buff *skb;
3984
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003985 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3986 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003989 if (atomic_read(&hdev->cmd_cnt)) {
3990 skb = skb_dequeue(&hdev->cmd_q);
3991 if (!skb)
3992 return;
3993
Wei Yongjun7585b972009-02-25 18:29:52 +08003994 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003996 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003997 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003999 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004000 if (test_bit(HCI_RESET, &hdev->flags))
4001 del_timer(&hdev->cmd_timer);
4002 else
4003 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004004 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 } else {
4006 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004007 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 }
4009 }
4010}