blob: 47fcb4983dbb444232302dbeba1cee1a1eb6c57a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann041000b2013-10-17 12:02:31 -0700190static int voice_setting_get(void *data, u64 *val)
191{
192 struct hci_dev *hdev = data;
193
194 hci_dev_lock(hdev);
195 *val = hdev->voice_setting;
196 hci_dev_unlock(hdev);
197
198 return 0;
199}
200
201DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
202 NULL, "0x%4.4llx\n");
203
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700204static int auto_accept_delay_set(void *data, u64 val)
205{
206 struct hci_dev *hdev = data;
207
208 hci_dev_lock(hdev);
209 hdev->auto_accept_delay = val;
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int auto_accept_delay_get(void *data, u64 *val)
216{
217 struct hci_dev *hdev = data;
218
219 hci_dev_lock(hdev);
220 *val = hdev->auto_accept_delay;
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
227 auto_accept_delay_set, "%llu\n");
228
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700229static int idle_timeout_set(void *data, u64 val)
230{
231 struct hci_dev *hdev = data;
232
233 if (val != 0 && (val < 500 || val > 3600000))
234 return -EINVAL;
235
236 hci_dev_lock(hdev);
237 hdev->idle_timeout= val;
238 hci_dev_unlock(hdev);
239
240 return 0;
241}
242
243static int idle_timeout_get(void *data, u64 *val)
244{
245 struct hci_dev *hdev = data;
246
247 hci_dev_lock(hdev);
248 *val = hdev->idle_timeout;
249 hci_dev_unlock(hdev);
250
251 return 0;
252}
253
254DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
255 idle_timeout_set, "%llu\n");
256
257static int sniff_min_interval_set(void *data, u64 val)
258{
259 struct hci_dev *hdev = data;
260
261 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
262 return -EINVAL;
263
264 hci_dev_lock(hdev);
265 hdev->sniff_min_interval= val;
266 hci_dev_unlock(hdev);
267
268 return 0;
269}
270
271static int sniff_min_interval_get(void *data, u64 *val)
272{
273 struct hci_dev *hdev = data;
274
275 hci_dev_lock(hdev);
276 *val = hdev->sniff_min_interval;
277 hci_dev_unlock(hdev);
278
279 return 0;
280}
281
282DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
283 sniff_min_interval_set, "%llu\n");
284
285static int sniff_max_interval_set(void *data, u64 val)
286{
287 struct hci_dev *hdev = data;
288
289 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
290 return -EINVAL;
291
292 hci_dev_lock(hdev);
293 hdev->sniff_max_interval= val;
294 hci_dev_unlock(hdev);
295
296 return 0;
297}
298
299static int sniff_max_interval_get(void *data, u64 *val)
300{
301 struct hci_dev *hdev = data;
302
303 hci_dev_lock(hdev);
304 *val = hdev->sniff_max_interval;
305 hci_dev_unlock(hdev);
306
307 return 0;
308}
309
310DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
311 sniff_max_interval_set, "%llu\n");
312
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700313static int static_address_show(struct seq_file *f, void *p)
314{
315 struct hci_dev *hdev = f->private;
316
317 hci_dev_lock(hdev);
318 seq_printf(f, "%pMR\n", &hdev->static_addr);
319 hci_dev_unlock(hdev);
320
321 return 0;
322}
323
324static int static_address_open(struct inode *inode, struct file *file)
325{
326 return single_open(file, static_address_show, inode->i_private);
327}
328
329static const struct file_operations static_address_fops = {
330 .open = static_address_open,
331 .read = seq_read,
332 .llseek = seq_lseek,
333 .release = single_release,
334};
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336/* ---- HCI requests ---- */
337
Johan Hedberg42c6b122013-03-05 20:37:49 +0200338static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 if (hdev->req_status == HCI_REQ_PEND) {
343 hdev->req_result = result;
344 hdev->req_status = HCI_REQ_DONE;
345 wake_up_interruptible(&hdev->req_wait_q);
346 }
347}
348
349static void hci_req_cancel(struct hci_dev *hdev, int err)
350{
351 BT_DBG("%s err 0x%2.2x", hdev->name, err);
352
353 if (hdev->req_status == HCI_REQ_PEND) {
354 hdev->req_result = err;
355 hdev->req_status = HCI_REQ_CANCELED;
356 wake_up_interruptible(&hdev->req_wait_q);
357 }
358}
359
Fengguang Wu77a63e02013-04-20 16:24:31 +0300360static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
361 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300362{
363 struct hci_ev_cmd_complete *ev;
364 struct hci_event_hdr *hdr;
365 struct sk_buff *skb;
366
367 hci_dev_lock(hdev);
368
369 skb = hdev->recv_evt;
370 hdev->recv_evt = NULL;
371
372 hci_dev_unlock(hdev);
373
374 if (!skb)
375 return ERR_PTR(-ENODATA);
376
377 if (skb->len < sizeof(*hdr)) {
378 BT_ERR("Too short HCI event");
379 goto failed;
380 }
381
382 hdr = (void *) skb->data;
383 skb_pull(skb, HCI_EVENT_HDR_SIZE);
384
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300385 if (event) {
386 if (hdr->evt != event)
387 goto failed;
388 return skb;
389 }
390
Johan Hedberg75e84b72013-04-02 13:35:04 +0300391 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
392 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
393 goto failed;
394 }
395
396 if (skb->len < sizeof(*ev)) {
397 BT_ERR("Too short cmd_complete event");
398 goto failed;
399 }
400
401 ev = (void *) skb->data;
402 skb_pull(skb, sizeof(*ev));
403
404 if (opcode == __le16_to_cpu(ev->opcode))
405 return skb;
406
407 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
408 __le16_to_cpu(ev->opcode));
409
410failed:
411 kfree_skb(skb);
412 return ERR_PTR(-ENODATA);
413}
414
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300415struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300416 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300417{
418 DECLARE_WAITQUEUE(wait, current);
419 struct hci_request req;
420 int err = 0;
421
422 BT_DBG("%s", hdev->name);
423
424 hci_req_init(&req, hdev);
425
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300426 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300427
428 hdev->req_status = HCI_REQ_PEND;
429
430 err = hci_req_run(&req, hci_req_sync_complete);
431 if (err < 0)
432 return ERR_PTR(err);
433
434 add_wait_queue(&hdev->req_wait_q, &wait);
435 set_current_state(TASK_INTERRUPTIBLE);
436
437 schedule_timeout(timeout);
438
439 remove_wait_queue(&hdev->req_wait_q, &wait);
440
441 if (signal_pending(current))
442 return ERR_PTR(-EINTR);
443
444 switch (hdev->req_status) {
445 case HCI_REQ_DONE:
446 err = -bt_to_errno(hdev->req_result);
447 break;
448
449 case HCI_REQ_CANCELED:
450 err = -hdev->req_result;
451 break;
452
453 default:
454 err = -ETIMEDOUT;
455 break;
456 }
457
458 hdev->req_status = hdev->req_result = 0;
459
460 BT_DBG("%s end: err %d", hdev->name, err);
461
462 if (err < 0)
463 return ERR_PTR(err);
464
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300465 return hci_get_cmd_complete(hdev, opcode, event);
466}
467EXPORT_SYMBOL(__hci_cmd_sync_ev);
468
469struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300470 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300471{
472 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300473}
474EXPORT_SYMBOL(__hci_cmd_sync);
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200477static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200478 void (*func)(struct hci_request *req,
479 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200480 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200482 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 DECLARE_WAITQUEUE(wait, current);
484 int err = 0;
485
486 BT_DBG("%s start", hdev->name);
487
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488 hci_req_init(&req, hdev);
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 hdev->req_status = HCI_REQ_PEND;
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 err = hci_req_run(&req, hci_req_sync_complete);
495 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200496 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300497
498 /* ENODATA means the HCI request command queue is empty.
499 * This can happen when a request with conditionals doesn't
500 * trigger any commands to be sent. This is normal behavior
501 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 */
Andre Guedes920c8302013-03-08 11:20:15 -0300503 if (err == -ENODATA)
504 return 0;
505
506 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200507 }
508
Andre Guedesbc4445c2013-03-08 11:20:13 -0300509 add_wait_queue(&hdev->req_wait_q, &wait);
510 set_current_state(TASK_INTERRUPTIBLE);
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 schedule_timeout(timeout);
513
514 remove_wait_queue(&hdev->req_wait_q, &wait);
515
516 if (signal_pending(current))
517 return -EINTR;
518
519 switch (hdev->req_status) {
520 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700521 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 break;
523
524 case HCI_REQ_CANCELED:
525 err = -hdev->req_result;
526 break;
527
528 default:
529 err = -ETIMEDOUT;
530 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Johan Hedberga5040ef2011-01-10 13:28:59 +0200533 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 BT_DBG("%s end: err %d", hdev->name, err);
536
537 return err;
538}
539
Johan Hedberg01178cd2013-03-05 20:37:41 +0200540static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200541 void (*req)(struct hci_request *req,
542 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200543 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 int ret;
546
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200547 if (!test_bit(HCI_UP, &hdev->flags))
548 return -ENETDOWN;
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 /* Serialize all requests */
551 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200552 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 hci_req_unlock(hdev);
554
555 return ret;
556}
557
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200563 set_bit(HCI_RESET, &req->hdev->flags);
564 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200574 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200575 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200576
577 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200582{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200584
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200585 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300587
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700588 /* Read Local Supported Commands */
589 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
590
591 /* Read Local Supported Features */
592 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
593
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300594 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300596
597 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700599
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700600 /* Read Flow Control Mode */
601 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
602
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700603 /* Read Location Data */
604 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200605}
606
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200608{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200610
611 BT_DBG("%s %ld", hdev->name, opt);
612
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300613 /* Reset */
614 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300616
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200617 switch (hdev->dev_type) {
618 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200620 break;
621
622 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200623 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200624 break;
625
626 default:
627 BT_ERR("Unknown device type %d", hdev->dev_type);
628 break;
629 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200630}
631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700634 struct hci_dev *hdev = req->hdev;
635
Johan Hedberg2177bab2013-03-05 20:37:43 +0200636 __le16 param;
637 __u8 flt_type;
638
639 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641
642 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200644
645 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200646 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647
648 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200650
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700651 /* Read Number of Supported IAC */
652 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
653
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700654 /* Read Current IAC LAP */
655 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
656
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657 /* Clear Event Filters */
658 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660
661 /* Connection accept timeout ~20 secs */
662 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700665 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
666 * but it does not support page scan related HCI commands.
667 */
668 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500669 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
670 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
671 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672}
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300676 struct hci_dev *hdev = req->hdev;
677
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200680
681 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683
684 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200685 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200686
687 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200689
690 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300692
693 /* LE-only controllers have LE implicitly enabled */
694 if (!lmp_bredr_capable(hdev))
695 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696}
697
698static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
699{
700 if (lmp_ext_inq_capable(hdev))
701 return 0x02;
702
703 if (lmp_inq_rssi_capable(hdev))
704 return 0x01;
705
706 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
707 hdev->lmp_subver == 0x0757)
708 return 0x01;
709
710 if (hdev->manufacturer == 15) {
711 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
712 return 0x01;
713 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
714 return 0x01;
715 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
716 return 0x01;
717 }
718
719 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
720 hdev->lmp_subver == 0x1805)
721 return 0x01;
722
723 return 0x00;
724}
725
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727{
728 u8 mode;
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200733}
734
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200736{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 struct hci_dev *hdev = req->hdev;
738
Johan Hedberg2177bab2013-03-05 20:37:43 +0200739 /* The second byte is 0xff instead of 0x9f (two reserved bits
740 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
741 * command otherwise.
742 */
743 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
744
745 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
746 * any event mask for pre 1.2 devices.
747 */
748 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
749 return;
750
751 if (lmp_bredr_capable(hdev)) {
752 events[4] |= 0x01; /* Flow Specification Complete */
753 events[4] |= 0x02; /* Inquiry Result with RSSI */
754 events[4] |= 0x04; /* Read Remote Extended Features Complete */
755 events[5] |= 0x08; /* Synchronous Connection Complete */
756 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700757 } else {
758 /* Use a different default for LE-only devices */
759 memset(events, 0, sizeof(events));
760 events[0] |= 0x10; /* Disconnection Complete */
761 events[0] |= 0x80; /* Encryption Change */
762 events[1] |= 0x08; /* Read Remote Version Information Complete */
763 events[1] |= 0x20; /* Command Complete */
764 events[1] |= 0x40; /* Command Status */
765 events[1] |= 0x80; /* Hardware Error */
766 events[2] |= 0x04; /* Number of Completed Packets */
767 events[3] |= 0x02; /* Data Buffer Overflow */
768 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200769 }
770
771 if (lmp_inq_rssi_capable(hdev))
772 events[4] |= 0x02; /* Inquiry Result with RSSI */
773
774 if (lmp_sniffsubr_capable(hdev))
775 events[5] |= 0x20; /* Sniff Subrating */
776
777 if (lmp_pause_enc_capable(hdev))
778 events[5] |= 0x80; /* Encryption Key Refresh Complete */
779
780 if (lmp_ext_inq_capable(hdev))
781 events[5] |= 0x40; /* Extended Inquiry Result */
782
783 if (lmp_no_flush_capable(hdev))
784 events[7] |= 0x01; /* Enhanced Flush Complete */
785
786 if (lmp_lsto_capable(hdev))
787 events[6] |= 0x80; /* Link Supervision Timeout Changed */
788
789 if (lmp_ssp_capable(hdev)) {
790 events[6] |= 0x01; /* IO Capability Request */
791 events[6] |= 0x02; /* IO Capability Response */
792 events[6] |= 0x04; /* User Confirmation Request */
793 events[6] |= 0x08; /* User Passkey Request */
794 events[6] |= 0x10; /* Remote OOB Data Request */
795 events[6] |= 0x20; /* Simple Pairing Complete */
796 events[7] |= 0x04; /* User Passkey Notification */
797 events[7] |= 0x08; /* Keypress Notification */
798 events[7] |= 0x10; /* Remote Host Supported
799 * Features Notification
800 */
801 }
802
803 if (lmp_le_capable(hdev))
804 events[7] |= 0x20; /* LE Meta-Event */
805
Johan Hedberg42c6b122013-03-05 20:37:49 +0200806 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200807
808 if (lmp_le_capable(hdev)) {
809 memset(events, 0, sizeof(events));
810 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
812 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200813 }
814}
815
Johan Hedberg42c6b122013-03-05 20:37:49 +0200816static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200817{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200818 struct hci_dev *hdev = req->hdev;
819
Johan Hedberg2177bab2013-03-05 20:37:43 +0200820 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200821 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300822 else
823 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200824
825 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200826 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200827
Johan Hedberg42c6b122013-03-05 20:37:49 +0200828 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200829
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300830 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
831 * local supported commands HCI command.
832 */
833 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200834 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200835
836 if (lmp_ssp_capable(hdev)) {
837 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
838 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200839 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
840 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200841 } else {
842 struct hci_cp_write_eir cp;
843
844 memset(hdev->eir, 0, sizeof(hdev->eir));
845 memset(&cp, 0, sizeof(cp));
846
Johan Hedberg42c6b122013-03-05 20:37:49 +0200847 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200848 }
849 }
850
851 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200853
854 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200855 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200856
857 if (lmp_ext_feat_capable(hdev)) {
858 struct hci_cp_read_local_ext_features cp;
859
860 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200861 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
862 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200863 }
864
865 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
866 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200867 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
868 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869 }
870}
871
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200873{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200874 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200875 struct hci_cp_write_def_link_policy cp;
876 u16 link_policy = 0;
877
878 if (lmp_rswitch_capable(hdev))
879 link_policy |= HCI_LP_RSWITCH;
880 if (lmp_hold_capable(hdev))
881 link_policy |= HCI_LP_HOLD;
882 if (lmp_sniff_capable(hdev))
883 link_policy |= HCI_LP_SNIFF;
884 if (lmp_park_capable(hdev))
885 link_policy |= HCI_LP_PARK;
886
887 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200889}
890
Johan Hedberg42c6b122013-03-05 20:37:49 +0200891static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200892{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200893 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200894 struct hci_cp_write_le_host_supported cp;
895
Johan Hedbergc73eee92013-04-19 18:35:21 +0300896 /* LE-only devices do not support explicit enablement */
897 if (!lmp_bredr_capable(hdev))
898 return;
899
Johan Hedberg2177bab2013-03-05 20:37:43 +0200900 memset(&cp, 0, sizeof(cp));
901
902 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
903 cp.le = 0x01;
904 cp.simul = lmp_le_br_capable(hdev);
905 }
906
907 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200908 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
909 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200910}
911
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300912static void hci_set_event_mask_page_2(struct hci_request *req)
913{
914 struct hci_dev *hdev = req->hdev;
915 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
916
917 /* If Connectionless Slave Broadcast master role is supported
918 * enable all necessary events for it.
919 */
920 if (hdev->features[2][0] & 0x01) {
921 events[1] |= 0x40; /* Triggered Clock Capture */
922 events[1] |= 0x80; /* Synchronization Train Complete */
923 events[2] |= 0x10; /* Slave Page Response Timeout */
924 events[2] |= 0x20; /* CSB Channel Map Change */
925 }
926
927 /* If Connectionless Slave Broadcast slave role is supported
928 * enable all necessary events for it.
929 */
930 if (hdev->features[2][0] & 0x02) {
931 events[2] |= 0x01; /* Synchronization Train Received */
932 events[2] |= 0x02; /* CSB Receive */
933 events[2] |= 0x04; /* CSB Timeout */
934 events[2] |= 0x08; /* Truncated Page Complete */
935 }
936
937 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
938}
939
Johan Hedberg42c6b122013-03-05 20:37:49 +0200940static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200941{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200942 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300943 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200944
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100945 /* Some Broadcom based Bluetooth controllers do not support the
946 * Delete Stored Link Key command. They are clearly indicating its
947 * absence in the bit mask of supported commands.
948 *
949 * Check the supported commands and only if the the command is marked
950 * as supported send it. If not supported assume that the controller
951 * does not have actual support for stored link keys which makes this
952 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700953 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300954 if (hdev->commands[6] & 0x80) {
955 struct hci_cp_delete_stored_link_key cp;
956
957 bacpy(&cp.bdaddr, BDADDR_ANY);
958 cp.delete_all = 0x01;
959 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
960 sizeof(cp), &cp);
961 }
962
Johan Hedberg2177bab2013-03-05 20:37:43 +0200963 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200965
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700966 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300968
969 /* Read features beyond page 1 if available */
970 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
971 struct hci_cp_read_local_ext_features cp;
972
973 cp.page = p;
974 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
975 sizeof(cp), &cp);
976 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200977}
978
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300979static void hci_init4_req(struct hci_request *req, unsigned long opt)
980{
981 struct hci_dev *hdev = req->hdev;
982
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300983 /* Set event mask page 2 if the HCI command for it is supported */
984 if (hdev->commands[22] & 0x04)
985 hci_set_event_mask_page_2(req);
986
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300987 /* Check for Synchronization Train support */
988 if (hdev->features[2][0] & 0x04)
989 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
990}
991
Johan Hedberg2177bab2013-03-05 20:37:43 +0200992static int __hci_init(struct hci_dev *hdev)
993{
994 int err;
995
996 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
997 if (err < 0)
998 return err;
999
1000 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1001 * BR/EDR/LE type controllers. AMP controllers only need the
1002 * first stage init.
1003 */
1004 if (hdev->dev_type != HCI_BREDR)
1005 return 0;
1006
1007 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1008 if (err < 0)
1009 return err;
1010
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001011 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1012 if (err < 0)
1013 return err;
1014
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001015 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1016 if (err < 0)
1017 return err;
1018
1019 /* Only create debugfs entries during the initial setup
1020 * phase and not every time the controller gets powered on.
1021 */
1022 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1023 return 0;
1024
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001025 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1026 &features_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001027 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1028 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001029 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1030
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001031 if (lmp_bredr_capable(hdev)) {
1032 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1033 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001034 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1035 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001036 }
1037
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001038 if (lmp_ssp_capable(hdev))
1039 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1040 hdev, &auto_accept_delay_fops);
1041
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001042 if (lmp_sniff_capable(hdev)) {
1043 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1044 hdev, &idle_timeout_fops);
1045 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1046 hdev, &sniff_min_interval_fops);
1047 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1048 hdev, &sniff_max_interval_fops);
1049 }
1050
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001051 if (lmp_le_capable(hdev))
1052 debugfs_create_file("static_address", 0444, hdev->debugfs,
1053 hdev, &static_address_fops);
1054
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001055 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001056}
1057
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
1060 __u8 scan = opt;
1061
Johan Hedberg42c6b122013-03-05 20:37:49 +02001062 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066}
1067
Johan Hedberg42c6b122013-03-05 20:37:49 +02001068static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
1070 __u8 auth = opt;
1071
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076}
1077
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
1080 __u8 encrypt = opt;
1081
Johan Hedberg42c6b122013-03-05 20:37:49 +02001082 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001084 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001085 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086}
1087
Johan Hedberg42c6b122013-03-05 20:37:49 +02001088static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001089{
1090 __le16 policy = cpu_to_le16(opt);
1091
Johan Hedberg42c6b122013-03-05 20:37:49 +02001092 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001093
1094 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001096}
1097
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001098/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 * Device is held on return. */
1100struct hci_dev *hci_dev_get(int index)
1101{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001102 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 BT_DBG("%d", index);
1105
1106 if (index < 0)
1107 return NULL;
1108
1109 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001110 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 if (d->id == index) {
1112 hdev = hci_dev_hold(d);
1113 break;
1114 }
1115 }
1116 read_unlock(&hci_dev_list_lock);
1117 return hdev;
1118}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
1120/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001121
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001122bool hci_discovery_active(struct hci_dev *hdev)
1123{
1124 struct discovery_state *discov = &hdev->discovery;
1125
Andre Guedes6fbe1952012-02-03 17:47:58 -03001126 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001127 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001128 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001129 return true;
1130
Andre Guedes6fbe1952012-02-03 17:47:58 -03001131 default:
1132 return false;
1133 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001134}
1135
Johan Hedbergff9ef572012-01-04 14:23:45 +02001136void hci_discovery_set_state(struct hci_dev *hdev, int state)
1137{
1138 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1139
1140 if (hdev->discovery.state == state)
1141 return;
1142
1143 switch (state) {
1144 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001145 if (hdev->discovery.state != DISCOVERY_STARTING)
1146 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001147 break;
1148 case DISCOVERY_STARTING:
1149 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001150 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001151 mgmt_discovering(hdev, 1);
1152 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001153 case DISCOVERY_RESOLVING:
1154 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001155 case DISCOVERY_STOPPING:
1156 break;
1157 }
1158
1159 hdev->discovery.state = state;
1160}
1161
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001162void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163{
Johan Hedberg30883512012-01-04 14:16:21 +02001164 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001165 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Johan Hedberg561aafb2012-01-04 13:31:59 +02001167 list_for_each_entry_safe(p, n, &cache->all, all) {
1168 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001169 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001171
1172 INIT_LIST_HEAD(&cache->unknown);
1173 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174}
1175
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001176struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1177 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178{
Johan Hedberg30883512012-01-04 14:16:21 +02001179 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 struct inquiry_entry *e;
1181
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001182 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Johan Hedberg561aafb2012-01-04 13:31:59 +02001184 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001186 return e;
1187 }
1188
1189 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190}
1191
Johan Hedberg561aafb2012-01-04 13:31:59 +02001192struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001193 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001194{
Johan Hedberg30883512012-01-04 14:16:21 +02001195 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001196 struct inquiry_entry *e;
1197
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001198 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001199
1200 list_for_each_entry(e, &cache->unknown, list) {
1201 if (!bacmp(&e->data.bdaddr, bdaddr))
1202 return e;
1203 }
1204
1205 return NULL;
1206}
1207
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001208struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001209 bdaddr_t *bdaddr,
1210 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001211{
1212 struct discovery_state *cache = &hdev->discovery;
1213 struct inquiry_entry *e;
1214
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001215 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001216
1217 list_for_each_entry(e, &cache->resolve, list) {
1218 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1219 return e;
1220 if (!bacmp(&e->data.bdaddr, bdaddr))
1221 return e;
1222 }
1223
1224 return NULL;
1225}
1226
Johan Hedberga3d4e202012-01-09 00:53:02 +02001227void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001228 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001229{
1230 struct discovery_state *cache = &hdev->discovery;
1231 struct list_head *pos = &cache->resolve;
1232 struct inquiry_entry *p;
1233
1234 list_del(&ie->list);
1235
1236 list_for_each_entry(p, &cache->resolve, list) {
1237 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001238 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001239 break;
1240 pos = &p->list;
1241 }
1242
1243 list_add(&ie->list, pos);
1244}
1245
Johan Hedberg31754052012-01-04 13:39:52 +02001246bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001247 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248{
Johan Hedberg30883512012-01-04 14:16:21 +02001249 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001250 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001252 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
Szymon Janc2b2fec42012-11-20 11:38:54 +01001254 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1255
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001256 if (ssp)
1257 *ssp = data->ssp_mode;
1258
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001259 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001260 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001261 if (ie->data.ssp_mode && ssp)
1262 *ssp = true;
1263
Johan Hedberga3d4e202012-01-09 00:53:02 +02001264 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001265 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001266 ie->data.rssi = data->rssi;
1267 hci_inquiry_cache_update_resolve(hdev, ie);
1268 }
1269
Johan Hedberg561aafb2012-01-04 13:31:59 +02001270 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001271 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001272
Johan Hedberg561aafb2012-01-04 13:31:59 +02001273 /* Entry not in the cache. Add new one. */
1274 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1275 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001276 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001277
1278 list_add(&ie->all, &cache->all);
1279
1280 if (name_known) {
1281 ie->name_state = NAME_KNOWN;
1282 } else {
1283 ie->name_state = NAME_NOT_KNOWN;
1284 list_add(&ie->list, &cache->unknown);
1285 }
1286
1287update:
1288 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001289 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001290 ie->name_state = NAME_KNOWN;
1291 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 }
1293
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001294 memcpy(&ie->data, data, sizeof(*data));
1295 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001297
1298 if (ie->name_state == NAME_NOT_KNOWN)
1299 return false;
1300
1301 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302}
1303
1304static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1305{
Johan Hedberg30883512012-01-04 14:16:21 +02001306 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 struct inquiry_info *info = (struct inquiry_info *) buf;
1308 struct inquiry_entry *e;
1309 int copied = 0;
1310
Johan Hedberg561aafb2012-01-04 13:31:59 +02001311 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001313
1314 if (copied >= num)
1315 break;
1316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 bacpy(&info->bdaddr, &data->bdaddr);
1318 info->pscan_rep_mode = data->pscan_rep_mode;
1319 info->pscan_period_mode = data->pscan_period_mode;
1320 info->pscan_mode = data->pscan_mode;
1321 memcpy(info->dev_class, data->dev_class, 3);
1322 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001323
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001325 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 }
1327
1328 BT_DBG("cache %p, copied %d", cache, copied);
1329 return copied;
1330}
1331
Johan Hedberg42c6b122013-03-05 20:37:49 +02001332static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333{
1334 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001335 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 struct hci_cp_inquiry cp;
1337
1338 BT_DBG("%s", hdev->name);
1339
1340 if (test_bit(HCI_INQUIRY, &hdev->flags))
1341 return;
1342
1343 /* Start Inquiry */
1344 memcpy(&cp.lap, &ir->lap, 3);
1345 cp.length = ir->length;
1346 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348}
1349
Andre Guedes3e13fa12013-03-27 20:04:56 -03001350static int wait_inquiry(void *word)
1351{
1352 schedule();
1353 return signal_pending(current);
1354}
1355
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356int hci_inquiry(void __user *arg)
1357{
1358 __u8 __user *ptr = arg;
1359 struct hci_inquiry_req ir;
1360 struct hci_dev *hdev;
1361 int err = 0, do_inquiry = 0, max_rsp;
1362 long timeo;
1363 __u8 *buf;
1364
1365 if (copy_from_user(&ir, ptr, sizeof(ir)))
1366 return -EFAULT;
1367
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001368 hdev = hci_dev_get(ir.dev_id);
1369 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 return -ENODEV;
1371
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001372 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1373 err = -EBUSY;
1374 goto done;
1375 }
1376
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001377 if (hdev->dev_type != HCI_BREDR) {
1378 err = -EOPNOTSUPP;
1379 goto done;
1380 }
1381
Johan Hedberg56f87902013-10-02 13:43:13 +03001382 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1383 err = -EOPNOTSUPP;
1384 goto done;
1385 }
1386
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001387 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001388 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001389 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001390 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 do_inquiry = 1;
1392 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001393 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Marcel Holtmann04837f62006-07-03 10:02:33 +02001395 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001396
1397 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001398 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1399 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001400 if (err < 0)
1401 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001402
1403 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1404 * cleared). If it is interrupted by a signal, return -EINTR.
1405 */
1406 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1407 TASK_INTERRUPTIBLE))
1408 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001411 /* for unlimited number of responses we will use buffer with
1412 * 255 entries
1413 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1415
1416 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1417 * copy it to the user space.
1418 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001419 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001420 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 err = -ENOMEM;
1422 goto done;
1423 }
1424
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001425 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001427 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
1429 BT_DBG("num_rsp %d", ir.num_rsp);
1430
1431 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1432 ptr += sizeof(ir);
1433 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001434 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001436 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 err = -EFAULT;
1438
1439 kfree(buf);
1440
1441done:
1442 hci_dev_put(hdev);
1443 return err;
1444}
1445
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001446static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 int ret = 0;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 BT_DBG("%s %p", hdev->name, hdev);
1451
1452 hci_req_lock(hdev);
1453
Johan Hovold94324962012-03-15 14:48:41 +01001454 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1455 ret = -ENODEV;
1456 goto done;
1457 }
1458
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001459 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1460 /* Check for rfkill but allow the HCI setup stage to
1461 * proceed (which in itself doesn't cause any RF activity).
1462 */
1463 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1464 ret = -ERFKILL;
1465 goto done;
1466 }
1467
1468 /* Check for valid public address or a configured static
1469 * random adddress, but let the HCI setup proceed to
1470 * be able to determine if there is a public address
1471 * or not.
1472 *
1473 * This check is only valid for BR/EDR controllers
1474 * since AMP controllers do not have an address.
1475 */
1476 if (hdev->dev_type == HCI_BREDR &&
1477 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1478 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1479 ret = -EADDRNOTAVAIL;
1480 goto done;
1481 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001482 }
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (test_bit(HCI_UP, &hdev->flags)) {
1485 ret = -EALREADY;
1486 goto done;
1487 }
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 if (hdev->open(hdev)) {
1490 ret = -EIO;
1491 goto done;
1492 }
1493
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001494 atomic_set(&hdev->cmd_cnt, 1);
1495 set_bit(HCI_INIT, &hdev->flags);
1496
1497 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1498 ret = hdev->setup(hdev);
1499
1500 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001501 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1502 set_bit(HCI_RAW, &hdev->flags);
1503
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001504 if (!test_bit(HCI_RAW, &hdev->flags) &&
1505 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001506 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 }
1508
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001509 clear_bit(HCI_INIT, &hdev->flags);
1510
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 if (!ret) {
1512 hci_dev_hold(hdev);
1513 set_bit(HCI_UP, &hdev->flags);
1514 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001515 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001516 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001517 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001518 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001519 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001520 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001521 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001522 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001524 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001525 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001526 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
1528 skb_queue_purge(&hdev->cmd_q);
1529 skb_queue_purge(&hdev->rx_q);
1530
1531 if (hdev->flush)
1532 hdev->flush(hdev);
1533
1534 if (hdev->sent_cmd) {
1535 kfree_skb(hdev->sent_cmd);
1536 hdev->sent_cmd = NULL;
1537 }
1538
1539 hdev->close(hdev);
1540 hdev->flags = 0;
1541 }
1542
1543done:
1544 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return ret;
1546}
1547
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001548/* ---- HCI ioctl helpers ---- */
1549
1550int hci_dev_open(__u16 dev)
1551{
1552 struct hci_dev *hdev;
1553 int err;
1554
1555 hdev = hci_dev_get(dev);
1556 if (!hdev)
1557 return -ENODEV;
1558
Johan Hedberge1d08f42013-10-01 22:44:50 +03001559 /* We need to ensure that no other power on/off work is pending
1560 * before proceeding to call hci_dev_do_open. This is
1561 * particularly important if the setup procedure has not yet
1562 * completed.
1563 */
1564 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1565 cancel_delayed_work(&hdev->power_off);
1566
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001567 /* After this call it is guaranteed that the setup procedure
1568 * has finished. This means that error conditions like RFKILL
1569 * or no valid public or static random address apply.
1570 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001571 flush_workqueue(hdev->req_workqueue);
1572
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001573 err = hci_dev_do_open(hdev);
1574
1575 hci_dev_put(hdev);
1576
1577 return err;
1578}
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580static int hci_dev_do_close(struct hci_dev *hdev)
1581{
1582 BT_DBG("%s %p", hdev->name, hdev);
1583
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001584 cancel_delayed_work(&hdev->power_off);
1585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 hci_req_cancel(hdev, ENODEV);
1587 hci_req_lock(hdev);
1588
1589 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001590 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 hci_req_unlock(hdev);
1592 return 0;
1593 }
1594
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001595 /* Flush RX and TX works */
1596 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001597 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001599 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001600 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001601 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001602 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001603 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001604 }
1605
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001606 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001607 cancel_delayed_work(&hdev->service_cache);
1608
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001609 cancel_delayed_work_sync(&hdev->le_scan_disable);
1610
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001611 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001612 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001614 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 hci_notify(hdev, HCI_DEV_DOWN);
1617
1618 if (hdev->flush)
1619 hdev->flush(hdev);
1620
1621 /* Reset device */
1622 skb_queue_purge(&hdev->cmd_q);
1623 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001624 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001625 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001626 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001628 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 clear_bit(HCI_INIT, &hdev->flags);
1630 }
1631
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001632 /* flush cmd work */
1633 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
1635 /* Drop queues */
1636 skb_queue_purge(&hdev->rx_q);
1637 skb_queue_purge(&hdev->cmd_q);
1638 skb_queue_purge(&hdev->raw_q);
1639
1640 /* Drop last sent command */
1641 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001642 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 kfree_skb(hdev->sent_cmd);
1644 hdev->sent_cmd = NULL;
1645 }
1646
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001647 kfree_skb(hdev->recv_evt);
1648 hdev->recv_evt = NULL;
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 /* After this point our queues are empty
1651 * and no tasks are scheduled. */
1652 hdev->close(hdev);
1653
Johan Hedberg35b973c2013-03-15 17:06:59 -05001654 /* Clear flags */
1655 hdev->flags = 0;
1656 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1657
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001658 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1659 if (hdev->dev_type == HCI_BREDR) {
1660 hci_dev_lock(hdev);
1661 mgmt_powered(hdev, 0);
1662 hci_dev_unlock(hdev);
1663 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001664 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001665
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001666 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001667 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001668
Johan Hedberge59fda82012-02-22 18:11:53 +02001669 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001670 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 hci_req_unlock(hdev);
1673
1674 hci_dev_put(hdev);
1675 return 0;
1676}
1677
1678int hci_dev_close(__u16 dev)
1679{
1680 struct hci_dev *hdev;
1681 int err;
1682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001683 hdev = hci_dev_get(dev);
1684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001686
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001687 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1688 err = -EBUSY;
1689 goto done;
1690 }
1691
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001692 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1693 cancel_delayed_work(&hdev->power_off);
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001696
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001697done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 hci_dev_put(hdev);
1699 return err;
1700}
1701
1702int hci_dev_reset(__u16 dev)
1703{
1704 struct hci_dev *hdev;
1705 int ret = 0;
1706
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001707 hdev = hci_dev_get(dev);
1708 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 return -ENODEV;
1710
1711 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
Marcel Holtmann808a0492013-08-26 20:57:58 -07001713 if (!test_bit(HCI_UP, &hdev->flags)) {
1714 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001718 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1719 ret = -EBUSY;
1720 goto done;
1721 }
1722
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 /* Drop queues */
1724 skb_queue_purge(&hdev->rx_q);
1725 skb_queue_purge(&hdev->cmd_q);
1726
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001727 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001728 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001730 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 if (hdev->flush)
1733 hdev->flush(hdev);
1734
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001735 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001736 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001739 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
1741done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 hci_req_unlock(hdev);
1743 hci_dev_put(hdev);
1744 return ret;
1745}
1746
1747int hci_dev_reset_stat(__u16 dev)
1748{
1749 struct hci_dev *hdev;
1750 int ret = 0;
1751
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001752 hdev = hci_dev_get(dev);
1753 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 return -ENODEV;
1755
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001756 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1757 ret = -EBUSY;
1758 goto done;
1759 }
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1762
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001763done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 return ret;
1766}
1767
1768int hci_dev_cmd(unsigned int cmd, void __user *arg)
1769{
1770 struct hci_dev *hdev;
1771 struct hci_dev_req dr;
1772 int err = 0;
1773
1774 if (copy_from_user(&dr, arg, sizeof(dr)))
1775 return -EFAULT;
1776
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001777 hdev = hci_dev_get(dr.dev_id);
1778 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 return -ENODEV;
1780
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001781 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1782 err = -EBUSY;
1783 goto done;
1784 }
1785
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001786 if (hdev->dev_type != HCI_BREDR) {
1787 err = -EOPNOTSUPP;
1788 goto done;
1789 }
1790
Johan Hedberg56f87902013-10-02 13:43:13 +03001791 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1792 err = -EOPNOTSUPP;
1793 goto done;
1794 }
1795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 switch (cmd) {
1797 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001798 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1799 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 break;
1801
1802 case HCISETENCRYPT:
1803 if (!lmp_encrypt_capable(hdev)) {
1804 err = -EOPNOTSUPP;
1805 break;
1806 }
1807
1808 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1809 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001810 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1811 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 if (err)
1813 break;
1814 }
1815
Johan Hedberg01178cd2013-03-05 20:37:41 +02001816 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1817 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 break;
1819
1820 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001821 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1822 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 break;
1824
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001825 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001826 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1827 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001828 break;
1829
1830 case HCISETLINKMODE:
1831 hdev->link_mode = ((__u16) dr.dev_opt) &
1832 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1833 break;
1834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 case HCISETPTYPE:
1836 hdev->pkt_type = (__u16) dr.dev_opt;
1837 break;
1838
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001840 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1841 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 break;
1843
1844 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001845 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1846 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 break;
1848
1849 default:
1850 err = -EINVAL;
1851 break;
1852 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001853
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001854done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 hci_dev_put(hdev);
1856 return err;
1857}
1858
1859int hci_get_dev_list(void __user *arg)
1860{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001861 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 struct hci_dev_list_req *dl;
1863 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 int n = 0, size, err;
1865 __u16 dev_num;
1866
1867 if (get_user(dev_num, (__u16 __user *) arg))
1868 return -EFAULT;
1869
1870 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1871 return -EINVAL;
1872
1873 size = sizeof(*dl) + dev_num * sizeof(*dr);
1874
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001875 dl = kzalloc(size, GFP_KERNEL);
1876 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 return -ENOMEM;
1878
1879 dr = dl->dev_req;
1880
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001881 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001882 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001883 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001884 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001885
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001886 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1887 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 (dr + n)->dev_id = hdev->id;
1890 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 if (++n >= dev_num)
1893 break;
1894 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001895 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
1897 dl->dev_num = n;
1898 size = sizeof(*dl) + n * sizeof(*dr);
1899
1900 err = copy_to_user(arg, dl, size);
1901 kfree(dl);
1902
1903 return err ? -EFAULT : 0;
1904}
1905
1906int hci_get_dev_info(void __user *arg)
1907{
1908 struct hci_dev *hdev;
1909 struct hci_dev_info di;
1910 int err = 0;
1911
1912 if (copy_from_user(&di, arg, sizeof(di)))
1913 return -EFAULT;
1914
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001915 hdev = hci_dev_get(di.dev_id);
1916 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 return -ENODEV;
1918
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001919 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001920 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001921
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001922 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1923 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001924
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 strcpy(di.name, hdev->name);
1926 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001927 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 di.flags = hdev->flags;
1929 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001930 if (lmp_bredr_capable(hdev)) {
1931 di.acl_mtu = hdev->acl_mtu;
1932 di.acl_pkts = hdev->acl_pkts;
1933 di.sco_mtu = hdev->sco_mtu;
1934 di.sco_pkts = hdev->sco_pkts;
1935 } else {
1936 di.acl_mtu = hdev->le_mtu;
1937 di.acl_pkts = hdev->le_pkts;
1938 di.sco_mtu = 0;
1939 di.sco_pkts = 0;
1940 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 di.link_policy = hdev->link_policy;
1942 di.link_mode = hdev->link_mode;
1943
1944 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1945 memcpy(&di.features, &hdev->features, sizeof(di.features));
1946
1947 if (copy_to_user(arg, &di, sizeof(di)))
1948 err = -EFAULT;
1949
1950 hci_dev_put(hdev);
1951
1952 return err;
1953}
1954
1955/* ---- Interface to HCI drivers ---- */
1956
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001957static int hci_rfkill_set_block(void *data, bool blocked)
1958{
1959 struct hci_dev *hdev = data;
1960
1961 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1962
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001963 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1964 return -EBUSY;
1965
Johan Hedberg5e130362013-09-13 08:58:17 +03001966 if (blocked) {
1967 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001968 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1969 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001970 } else {
1971 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001972 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001973
1974 return 0;
1975}
1976
1977static const struct rfkill_ops hci_rfkill_ops = {
1978 .set_block = hci_rfkill_set_block,
1979};
1980
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001981static void hci_power_on(struct work_struct *work)
1982{
1983 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001984 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001985
1986 BT_DBG("%s", hdev->name);
1987
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001988 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001989 if (err < 0) {
1990 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001991 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001992 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001993
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001994 /* During the HCI setup phase, a few error conditions are
1995 * ignored and they need to be checked now. If they are still
1996 * valid, it is important to turn the device back off.
1997 */
1998 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1999 (hdev->dev_type == HCI_BREDR &&
2000 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2001 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002002 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2003 hci_dev_do_close(hdev);
2004 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002005 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2006 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002007 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002008
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002009 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002010 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002011}
2012
2013static void hci_power_off(struct work_struct *work)
2014{
Johan Hedberg32435532011-11-07 22:16:04 +02002015 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002016 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002017
2018 BT_DBG("%s", hdev->name);
2019
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002020 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002021}
2022
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002023static void hci_discov_off(struct work_struct *work)
2024{
2025 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002026
2027 hdev = container_of(work, struct hci_dev, discov_off.work);
2028
2029 BT_DBG("%s", hdev->name);
2030
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002031 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002032}
2033
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002034int hci_uuids_clear(struct hci_dev *hdev)
2035{
Johan Hedberg48210022013-01-27 00:31:28 +02002036 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002037
Johan Hedberg48210022013-01-27 00:31:28 +02002038 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2039 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002040 kfree(uuid);
2041 }
2042
2043 return 0;
2044}
2045
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002046int hci_link_keys_clear(struct hci_dev *hdev)
2047{
2048 struct list_head *p, *n;
2049
2050 list_for_each_safe(p, n, &hdev->link_keys) {
2051 struct link_key *key;
2052
2053 key = list_entry(p, struct link_key, list);
2054
2055 list_del(p);
2056 kfree(key);
2057 }
2058
2059 return 0;
2060}
2061
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002062int hci_smp_ltks_clear(struct hci_dev *hdev)
2063{
2064 struct smp_ltk *k, *tmp;
2065
2066 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2067 list_del(&k->list);
2068 kfree(k);
2069 }
2070
2071 return 0;
2072}
2073
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002074struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2075{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002076 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002077
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002078 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002079 if (bacmp(bdaddr, &k->bdaddr) == 0)
2080 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002081
2082 return NULL;
2083}
2084
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302085static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002086 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002087{
2088 /* Legacy key */
2089 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302090 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002091
2092 /* Debug keys are insecure so don't store them persistently */
2093 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302094 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002095
2096 /* Changed combination key and there's no previous one */
2097 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302098 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002099
2100 /* Security mode 3 case */
2101 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302102 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002103
2104 /* Neither local nor remote side had no-bonding as requirement */
2105 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302106 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002107
2108 /* Local side had dedicated bonding as requirement */
2109 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302110 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002111
2112 /* Remote side had dedicated bonding as requirement */
2113 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302114 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002115
2116 /* If none of the above criteria match, then don't store the key
2117 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302118 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002119}
2120
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002121struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002122{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002123 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002124
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002125 list_for_each_entry(k, &hdev->long_term_keys, list) {
2126 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002127 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002128 continue;
2129
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002130 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002131 }
2132
2133 return NULL;
2134}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002135
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002136struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002137 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002138{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002139 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002140
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002141 list_for_each_entry(k, &hdev->long_term_keys, list)
2142 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002143 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002144 return k;
2145
2146 return NULL;
2147}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002148
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002149int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002150 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002151{
2152 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302153 u8 old_key_type;
2154 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002155
2156 old_key = hci_find_link_key(hdev, bdaddr);
2157 if (old_key) {
2158 old_key_type = old_key->type;
2159 key = old_key;
2160 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002161 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002162 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2163 if (!key)
2164 return -ENOMEM;
2165 list_add(&key->list, &hdev->link_keys);
2166 }
2167
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002168 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002169
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002170 /* Some buggy controller combinations generate a changed
2171 * combination key for legacy pairing even when there's no
2172 * previous key */
2173 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002174 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002175 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002176 if (conn)
2177 conn->key_type = type;
2178 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002179
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002180 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002181 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002182 key->pin_len = pin_len;
2183
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002184 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002185 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002186 else
2187 key->type = type;
2188
Johan Hedberg4df378a2011-04-28 11:29:03 -07002189 if (!new_key)
2190 return 0;
2191
2192 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2193
Johan Hedberg744cf192011-11-08 20:40:14 +02002194 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002195
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302196 if (conn)
2197 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002198
2199 return 0;
2200}
2201
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002202int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002203 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002204 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002205{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002206 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002207
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002208 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2209 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002210
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002211 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2212 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002213 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002214 else {
2215 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002216 if (!key)
2217 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002218 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002219 }
2220
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002221 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002222 key->bdaddr_type = addr_type;
2223 memcpy(key->val, tk, sizeof(key->val));
2224 key->authenticated = authenticated;
2225 key->ediv = ediv;
2226 key->enc_size = enc_size;
2227 key->type = type;
2228 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002229
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002230 if (!new_key)
2231 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002232
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002233 if (type & HCI_SMP_LTK)
2234 mgmt_new_ltk(hdev, key, 1);
2235
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002236 return 0;
2237}
2238
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002239int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2240{
2241 struct link_key *key;
2242
2243 key = hci_find_link_key(hdev, bdaddr);
2244 if (!key)
2245 return -ENOENT;
2246
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002247 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002248
2249 list_del(&key->list);
2250 kfree(key);
2251
2252 return 0;
2253}
2254
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002255int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2256{
2257 struct smp_ltk *k, *tmp;
2258
2259 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2260 if (bacmp(bdaddr, &k->bdaddr))
2261 continue;
2262
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002263 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002264
2265 list_del(&k->list);
2266 kfree(k);
2267 }
2268
2269 return 0;
2270}
2271
Ville Tervo6bd32322011-02-16 16:32:41 +02002272/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002273static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002274{
2275 struct hci_dev *hdev = (void *) arg;
2276
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002277 if (hdev->sent_cmd) {
2278 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2279 u16 opcode = __le16_to_cpu(sent->opcode);
2280
2281 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2282 } else {
2283 BT_ERR("%s command tx timeout", hdev->name);
2284 }
2285
Ville Tervo6bd32322011-02-16 16:32:41 +02002286 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002287 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002288}
2289
Szymon Janc2763eda2011-03-22 13:12:22 +01002290struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002291 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002292{
2293 struct oob_data *data;
2294
2295 list_for_each_entry(data, &hdev->remote_oob_data, list)
2296 if (bacmp(bdaddr, &data->bdaddr) == 0)
2297 return data;
2298
2299 return NULL;
2300}
2301
2302int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2303{
2304 struct oob_data *data;
2305
2306 data = hci_find_remote_oob_data(hdev, bdaddr);
2307 if (!data)
2308 return -ENOENT;
2309
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002310 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002311
2312 list_del(&data->list);
2313 kfree(data);
2314
2315 return 0;
2316}
2317
2318int hci_remote_oob_data_clear(struct hci_dev *hdev)
2319{
2320 struct oob_data *data, *n;
2321
2322 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2323 list_del(&data->list);
2324 kfree(data);
2325 }
2326
2327 return 0;
2328}
2329
2330int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002331 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002332{
2333 struct oob_data *data;
2334
2335 data = hci_find_remote_oob_data(hdev, bdaddr);
2336
2337 if (!data) {
2338 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2339 if (!data)
2340 return -ENOMEM;
2341
2342 bacpy(&data->bdaddr, bdaddr);
2343 list_add(&data->list, &hdev->remote_oob_data);
2344 }
2345
2346 memcpy(data->hash, hash, sizeof(data->hash));
2347 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2348
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002349 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002350
2351 return 0;
2352}
2353
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002354struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2355 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002356{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002357 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002358
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002359 list_for_each_entry(b, &hdev->blacklist, list) {
2360 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002361 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002362 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002363
2364 return NULL;
2365}
2366
2367int hci_blacklist_clear(struct hci_dev *hdev)
2368{
2369 struct list_head *p, *n;
2370
2371 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002372 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002373
2374 list_del(p);
2375 kfree(b);
2376 }
2377
2378 return 0;
2379}
2380
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002381int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002382{
2383 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002384
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002385 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002386 return -EBADF;
2387
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002388 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002389 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002390
2391 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002392 if (!entry)
2393 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002394
2395 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002396 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002397
2398 list_add(&entry->list, &hdev->blacklist);
2399
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002400 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002401}
2402
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002403int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002404{
2405 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002406
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002407 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002408 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002409
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002410 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002411 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002412 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002413
2414 list_del(&entry->list);
2415 kfree(entry);
2416
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002417 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002418}
2419
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002420static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002421{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002422 if (status) {
2423 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002424
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002425 hci_dev_lock(hdev);
2426 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2427 hci_dev_unlock(hdev);
2428 return;
2429 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002430}
2431
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002432static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002433{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002434 /* General inquiry access code (GIAC) */
2435 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2436 struct hci_request req;
2437 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002438 int err;
2439
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002440 if (status) {
2441 BT_ERR("Failed to disable LE scanning: status %d", status);
2442 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002443 }
2444
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002445 switch (hdev->discovery.type) {
2446 case DISCOV_TYPE_LE:
2447 hci_dev_lock(hdev);
2448 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2449 hci_dev_unlock(hdev);
2450 break;
2451
2452 case DISCOV_TYPE_INTERLEAVED:
2453 hci_req_init(&req, hdev);
2454
2455 memset(&cp, 0, sizeof(cp));
2456 memcpy(&cp.lap, lap, sizeof(cp.lap));
2457 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2458 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2459
2460 hci_dev_lock(hdev);
2461
2462 hci_inquiry_cache_flush(hdev);
2463
2464 err = hci_req_run(&req, inquiry_complete);
2465 if (err) {
2466 BT_ERR("Inquiry request failed: err %d", err);
2467 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2468 }
2469
2470 hci_dev_unlock(hdev);
2471 break;
2472 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002473}
2474
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002475static void le_scan_disable_work(struct work_struct *work)
2476{
2477 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002478 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002479 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002480 struct hci_request req;
2481 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002482
2483 BT_DBG("%s", hdev->name);
2484
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002485 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002486
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002487 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002488 cp.enable = LE_SCAN_DISABLE;
2489 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002490
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002491 err = hci_req_run(&req, le_scan_disable_work_complete);
2492 if (err)
2493 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002494}
2495
David Herrmann9be0dab2012-04-22 14:39:57 +02002496/* Alloc HCI device */
2497struct hci_dev *hci_alloc_dev(void)
2498{
2499 struct hci_dev *hdev;
2500
2501 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2502 if (!hdev)
2503 return NULL;
2504
David Herrmannb1b813d2012-04-22 14:39:58 +02002505 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2506 hdev->esco_type = (ESCO_HV1);
2507 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002508 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2509 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002510 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2511 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002512
David Herrmannb1b813d2012-04-22 14:39:58 +02002513 hdev->sniff_max_interval = 800;
2514 hdev->sniff_min_interval = 80;
2515
Marcel Holtmannbef64732013-10-11 08:23:19 -07002516 hdev->le_scan_interval = 0x0060;
2517 hdev->le_scan_window = 0x0030;
2518
David Herrmannb1b813d2012-04-22 14:39:58 +02002519 mutex_init(&hdev->lock);
2520 mutex_init(&hdev->req_lock);
2521
2522 INIT_LIST_HEAD(&hdev->mgmt_pending);
2523 INIT_LIST_HEAD(&hdev->blacklist);
2524 INIT_LIST_HEAD(&hdev->uuids);
2525 INIT_LIST_HEAD(&hdev->link_keys);
2526 INIT_LIST_HEAD(&hdev->long_term_keys);
2527 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002528 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002529
2530 INIT_WORK(&hdev->rx_work, hci_rx_work);
2531 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2532 INIT_WORK(&hdev->tx_work, hci_tx_work);
2533 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002534
David Herrmannb1b813d2012-04-22 14:39:58 +02002535 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2536 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2537 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2538
David Herrmannb1b813d2012-04-22 14:39:58 +02002539 skb_queue_head_init(&hdev->rx_q);
2540 skb_queue_head_init(&hdev->cmd_q);
2541 skb_queue_head_init(&hdev->raw_q);
2542
2543 init_waitqueue_head(&hdev->req_wait_q);
2544
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002545 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002546
David Herrmannb1b813d2012-04-22 14:39:58 +02002547 hci_init_sysfs(hdev);
2548 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002549
2550 return hdev;
2551}
2552EXPORT_SYMBOL(hci_alloc_dev);
2553
2554/* Free HCI device */
2555void hci_free_dev(struct hci_dev *hdev)
2556{
David Herrmann9be0dab2012-04-22 14:39:57 +02002557 /* will free via device release */
2558 put_device(&hdev->dev);
2559}
2560EXPORT_SYMBOL(hci_free_dev);
2561
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562/* Register HCI device */
2563int hci_register_dev(struct hci_dev *hdev)
2564{
David Herrmannb1b813d2012-04-22 14:39:58 +02002565 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
David Herrmann010666a2012-01-07 15:47:07 +01002567 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 return -EINVAL;
2569
Mat Martineau08add512011-11-02 16:18:36 -07002570 /* Do not allow HCI_AMP devices to register at index 0,
2571 * so the index can be used as the AMP controller ID.
2572 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002573 switch (hdev->dev_type) {
2574 case HCI_BREDR:
2575 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2576 break;
2577 case HCI_AMP:
2578 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2579 break;
2580 default:
2581 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002583
Sasha Levin3df92b32012-05-27 22:36:56 +02002584 if (id < 0)
2585 return id;
2586
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 sprintf(hdev->name, "hci%d", id);
2588 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002589
2590 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2591
Kees Cookd8537542013-07-03 15:04:57 -07002592 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2593 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002594 if (!hdev->workqueue) {
2595 error = -ENOMEM;
2596 goto err;
2597 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002598
Kees Cookd8537542013-07-03 15:04:57 -07002599 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2600 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002601 if (!hdev->req_workqueue) {
2602 destroy_workqueue(hdev->workqueue);
2603 error = -ENOMEM;
2604 goto err;
2605 }
2606
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002607 if (!IS_ERR_OR_NULL(bt_debugfs))
2608 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2609
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002610 dev_set_name(&hdev->dev, "%s", hdev->name);
2611
2612 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002613 if (error < 0)
2614 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002616 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002617 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2618 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002619 if (hdev->rfkill) {
2620 if (rfkill_register(hdev->rfkill) < 0) {
2621 rfkill_destroy(hdev->rfkill);
2622 hdev->rfkill = NULL;
2623 }
2624 }
2625
Johan Hedberg5e130362013-09-13 08:58:17 +03002626 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2627 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2628
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002629 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002630 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002631
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002632 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002633 /* Assume BR/EDR support until proven otherwise (such as
2634 * through reading supported features during init.
2635 */
2636 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2637 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002638
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002639 write_lock(&hci_dev_list_lock);
2640 list_add(&hdev->list, &hci_dev_list);
2641 write_unlock(&hci_dev_list_lock);
2642
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002644 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645
Johan Hedberg19202572013-01-14 22:33:51 +02002646 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002647
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002649
David Herrmann33ca9542011-10-08 14:58:49 +02002650err_wqueue:
2651 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002652 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002653err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002654 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002655
David Herrmann33ca9542011-10-08 14:58:49 +02002656 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657}
2658EXPORT_SYMBOL(hci_register_dev);
2659
2660/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002661void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662{
Sasha Levin3df92b32012-05-27 22:36:56 +02002663 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002664
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002665 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
Johan Hovold94324962012-03-15 14:48:41 +01002667 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2668
Sasha Levin3df92b32012-05-27 22:36:56 +02002669 id = hdev->id;
2670
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002671 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002673 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
2675 hci_dev_do_close(hdev);
2676
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302677 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002678 kfree_skb(hdev->reassembly[i]);
2679
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002680 cancel_work_sync(&hdev->power_on);
2681
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002682 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002683 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002684 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002685 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002686 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002687 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002688
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002689 /* mgmt_index_removed should take care of emptying the
2690 * pending list */
2691 BUG_ON(!list_empty(&hdev->mgmt_pending));
2692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 hci_notify(hdev, HCI_DEV_UNREG);
2694
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002695 if (hdev->rfkill) {
2696 rfkill_unregister(hdev->rfkill);
2697 rfkill_destroy(hdev->rfkill);
2698 }
2699
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002700 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002701
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002702 debugfs_remove_recursive(hdev->debugfs);
2703
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002704 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002705 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002706
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002707 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002708 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002709 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002710 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002711 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002712 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002713 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002714
David Herrmanndc946bd2012-01-07 15:47:24 +01002715 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002716
2717 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718}
2719EXPORT_SYMBOL(hci_unregister_dev);
2720
2721/* Suspend HCI device */
2722int hci_suspend_dev(struct hci_dev *hdev)
2723{
2724 hci_notify(hdev, HCI_DEV_SUSPEND);
2725 return 0;
2726}
2727EXPORT_SYMBOL(hci_suspend_dev);
2728
2729/* Resume HCI device */
2730int hci_resume_dev(struct hci_dev *hdev)
2731{
2732 hci_notify(hdev, HCI_DEV_RESUME);
2733 return 0;
2734}
2735EXPORT_SYMBOL(hci_resume_dev);
2736
Marcel Holtmann76bca882009-11-18 00:40:39 +01002737/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002738int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002739{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002740 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002741 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002742 kfree_skb(skb);
2743 return -ENXIO;
2744 }
2745
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002746 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002747 bt_cb(skb)->incoming = 1;
2748
2749 /* Time stamp */
2750 __net_timestamp(skb);
2751
Marcel Holtmann76bca882009-11-18 00:40:39 +01002752 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002753 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002754
Marcel Holtmann76bca882009-11-18 00:40:39 +01002755 return 0;
2756}
2757EXPORT_SYMBOL(hci_recv_frame);
2758
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302759static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002760 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302761{
2762 int len = 0;
2763 int hlen = 0;
2764 int remain = count;
2765 struct sk_buff *skb;
2766 struct bt_skb_cb *scb;
2767
2768 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002769 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302770 return -EILSEQ;
2771
2772 skb = hdev->reassembly[index];
2773
2774 if (!skb) {
2775 switch (type) {
2776 case HCI_ACLDATA_PKT:
2777 len = HCI_MAX_FRAME_SIZE;
2778 hlen = HCI_ACL_HDR_SIZE;
2779 break;
2780 case HCI_EVENT_PKT:
2781 len = HCI_MAX_EVENT_SIZE;
2782 hlen = HCI_EVENT_HDR_SIZE;
2783 break;
2784 case HCI_SCODATA_PKT:
2785 len = HCI_MAX_SCO_SIZE;
2786 hlen = HCI_SCO_HDR_SIZE;
2787 break;
2788 }
2789
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002790 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302791 if (!skb)
2792 return -ENOMEM;
2793
2794 scb = (void *) skb->cb;
2795 scb->expect = hlen;
2796 scb->pkt_type = type;
2797
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302798 hdev->reassembly[index] = skb;
2799 }
2800
2801 while (count) {
2802 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002803 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302804
2805 memcpy(skb_put(skb, len), data, len);
2806
2807 count -= len;
2808 data += len;
2809 scb->expect -= len;
2810 remain = count;
2811
2812 switch (type) {
2813 case HCI_EVENT_PKT:
2814 if (skb->len == HCI_EVENT_HDR_SIZE) {
2815 struct hci_event_hdr *h = hci_event_hdr(skb);
2816 scb->expect = h->plen;
2817
2818 if (skb_tailroom(skb) < scb->expect) {
2819 kfree_skb(skb);
2820 hdev->reassembly[index] = NULL;
2821 return -ENOMEM;
2822 }
2823 }
2824 break;
2825
2826 case HCI_ACLDATA_PKT:
2827 if (skb->len == HCI_ACL_HDR_SIZE) {
2828 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2829 scb->expect = __le16_to_cpu(h->dlen);
2830
2831 if (skb_tailroom(skb) < scb->expect) {
2832 kfree_skb(skb);
2833 hdev->reassembly[index] = NULL;
2834 return -ENOMEM;
2835 }
2836 }
2837 break;
2838
2839 case HCI_SCODATA_PKT:
2840 if (skb->len == HCI_SCO_HDR_SIZE) {
2841 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2842 scb->expect = h->dlen;
2843
2844 if (skb_tailroom(skb) < scb->expect) {
2845 kfree_skb(skb);
2846 hdev->reassembly[index] = NULL;
2847 return -ENOMEM;
2848 }
2849 }
2850 break;
2851 }
2852
2853 if (scb->expect == 0) {
2854 /* Complete frame */
2855
2856 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002857 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302858
2859 hdev->reassembly[index] = NULL;
2860 return remain;
2861 }
2862 }
2863
2864 return remain;
2865}
2866
Marcel Holtmannef222012007-07-11 06:42:04 +02002867int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2868{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302869 int rem = 0;
2870
Marcel Holtmannef222012007-07-11 06:42:04 +02002871 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2872 return -EILSEQ;
2873
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002874 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002875 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302876 if (rem < 0)
2877 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002878
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302879 data += (count - rem);
2880 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002881 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002882
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302883 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002884}
2885EXPORT_SYMBOL(hci_recv_fragment);
2886
Suraj Sumangala99811512010-07-14 13:02:19 +05302887#define STREAM_REASSEMBLY 0
2888
2889int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2890{
2891 int type;
2892 int rem = 0;
2893
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002894 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302895 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2896
2897 if (!skb) {
2898 struct { char type; } *pkt;
2899
2900 /* Start of the frame */
2901 pkt = data;
2902 type = pkt->type;
2903
2904 data++;
2905 count--;
2906 } else
2907 type = bt_cb(skb)->pkt_type;
2908
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002909 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002910 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302911 if (rem < 0)
2912 return rem;
2913
2914 data += (count - rem);
2915 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002916 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302917
2918 return rem;
2919}
2920EXPORT_SYMBOL(hci_recv_stream_fragment);
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922/* ---- Interface to upper protocols ---- */
2923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924int hci_register_cb(struct hci_cb *cb)
2925{
2926 BT_DBG("%p name %s", cb, cb->name);
2927
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002928 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002930 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 return 0;
2933}
2934EXPORT_SYMBOL(hci_register_cb);
2935
2936int hci_unregister_cb(struct hci_cb *cb)
2937{
2938 BT_DBG("%p name %s", cb, cb->name);
2939
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002940 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002942 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944 return 0;
2945}
2946EXPORT_SYMBOL(hci_unregister_cb);
2947
Marcel Holtmann51086992013-10-10 14:54:19 -07002948static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002950 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002952 /* Time stamp */
2953 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002955 /* Send copy to monitor */
2956 hci_send_to_monitor(hdev, skb);
2957
2958 if (atomic_read(&hdev->promisc)) {
2959 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002960 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 }
2962
2963 /* Get rid of skb owner, prior to sending to the driver. */
2964 skb_orphan(skb);
2965
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002966 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002967 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968}
2969
Johan Hedberg3119ae92013-03-05 20:37:44 +02002970void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2971{
2972 skb_queue_head_init(&req->cmd_q);
2973 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002974 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002975}
2976
2977int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2978{
2979 struct hci_dev *hdev = req->hdev;
2980 struct sk_buff *skb;
2981 unsigned long flags;
2982
2983 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2984
Andre Guedes5d73e032013-03-08 11:20:16 -03002985 /* If an error occured during request building, remove all HCI
2986 * commands queued on the HCI request queue.
2987 */
2988 if (req->err) {
2989 skb_queue_purge(&req->cmd_q);
2990 return req->err;
2991 }
2992
Johan Hedberg3119ae92013-03-05 20:37:44 +02002993 /* Do not allow empty requests */
2994 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002995 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002996
2997 skb = skb_peek_tail(&req->cmd_q);
2998 bt_cb(skb)->req.complete = complete;
2999
3000 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3001 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3002 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3003
3004 queue_work(hdev->workqueue, &hdev->cmd_work);
3005
3006 return 0;
3007}
3008
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003009static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003010 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011{
3012 int len = HCI_COMMAND_HDR_SIZE + plen;
3013 struct hci_command_hdr *hdr;
3014 struct sk_buff *skb;
3015
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003017 if (!skb)
3018 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019
3020 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003021 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 hdr->plen = plen;
3023
3024 if (plen)
3025 memcpy(skb_put(skb, plen), param, plen);
3026
3027 BT_DBG("skb len %d", skb->len);
3028
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003029 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003030
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003031 return skb;
3032}
3033
3034/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003035int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3036 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003037{
3038 struct sk_buff *skb;
3039
3040 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3041
3042 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3043 if (!skb) {
3044 BT_ERR("%s no memory for command", hdev->name);
3045 return -ENOMEM;
3046 }
3047
Johan Hedberg11714b32013-03-05 20:37:47 +02003048 /* Stand-alone HCI commands must be flaged as
3049 * single-command requests.
3050 */
3051 bt_cb(skb)->req.start = true;
3052
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003054 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055
3056 return 0;
3057}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
Johan Hedberg71c76a12013-03-05 20:37:46 +02003059/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003060void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3061 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003062{
3063 struct hci_dev *hdev = req->hdev;
3064 struct sk_buff *skb;
3065
3066 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3067
Andre Guedes34739c12013-03-08 11:20:18 -03003068 /* If an error occured during request building, there is no point in
3069 * queueing the HCI command. We can simply return.
3070 */
3071 if (req->err)
3072 return;
3073
Johan Hedberg71c76a12013-03-05 20:37:46 +02003074 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3075 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003076 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3077 hdev->name, opcode);
3078 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003079 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003080 }
3081
3082 if (skb_queue_empty(&req->cmd_q))
3083 bt_cb(skb)->req.start = true;
3084
Johan Hedberg02350a72013-04-03 21:50:29 +03003085 bt_cb(skb)->req.event = event;
3086
Johan Hedberg71c76a12013-03-05 20:37:46 +02003087 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003088}
3089
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003090void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3091 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003092{
3093 hci_req_add_ev(req, opcode, plen, param, 0);
3094}
3095
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003097void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098{
3099 struct hci_command_hdr *hdr;
3100
3101 if (!hdev->sent_cmd)
3102 return NULL;
3103
3104 hdr = (void *) hdev->sent_cmd->data;
3105
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003106 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 return NULL;
3108
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003109 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110
3111 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3112}
3113
3114/* Send ACL data */
3115static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3116{
3117 struct hci_acl_hdr *hdr;
3118 int len = skb->len;
3119
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003120 skb_push(skb, HCI_ACL_HDR_SIZE);
3121 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003122 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003123 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3124 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125}
3126
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003127static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003128 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003130 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 struct hci_dev *hdev = conn->hdev;
3132 struct sk_buff *list;
3133
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003134 skb->len = skb_headlen(skb);
3135 skb->data_len = 0;
3136
3137 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003138
3139 switch (hdev->dev_type) {
3140 case HCI_BREDR:
3141 hci_add_acl_hdr(skb, conn->handle, flags);
3142 break;
3143 case HCI_AMP:
3144 hci_add_acl_hdr(skb, chan->handle, flags);
3145 break;
3146 default:
3147 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3148 return;
3149 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003150
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003151 list = skb_shinfo(skb)->frag_list;
3152 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 /* Non fragmented */
3154 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3155
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003156 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 } else {
3158 /* Fragmented */
3159 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3160
3161 skb_shinfo(skb)->frag_list = NULL;
3162
3163 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003164 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003166 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003167
3168 flags &= ~ACL_START;
3169 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 do {
3171 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003172
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003173 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003174 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175
3176 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3177
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003178 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 } while (list);
3180
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003181 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003183}
3184
3185void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3186{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003187 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003188
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003189 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003190
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003191 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003193 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195
3196/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003197void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198{
3199 struct hci_dev *hdev = conn->hdev;
3200 struct hci_sco_hdr hdr;
3201
3202 BT_DBG("%s len %d", hdev->name, skb->len);
3203
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003204 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 hdr.dlen = skb->len;
3206
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003207 skb_push(skb, HCI_SCO_HDR_SIZE);
3208 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003209 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003211 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003212
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003214 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216
3217/* ---- HCI TX task (outgoing data) ---- */
3218
3219/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003220static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3221 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222{
3223 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003224 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003225 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003227 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003229
3230 rcu_read_lock();
3231
3232 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003233 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003235
3236 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3237 continue;
3238
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 num++;
3240
3241 if (c->sent < min) {
3242 min = c->sent;
3243 conn = c;
3244 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003245
3246 if (hci_conn_num(hdev, type) == num)
3247 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 }
3249
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003250 rcu_read_unlock();
3251
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003253 int cnt, q;
3254
3255 switch (conn->type) {
3256 case ACL_LINK:
3257 cnt = hdev->acl_cnt;
3258 break;
3259 case SCO_LINK:
3260 case ESCO_LINK:
3261 cnt = hdev->sco_cnt;
3262 break;
3263 case LE_LINK:
3264 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3265 break;
3266 default:
3267 cnt = 0;
3268 BT_ERR("Unknown link type");
3269 }
3270
3271 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 *quote = q ? q : 1;
3273 } else
3274 *quote = 0;
3275
3276 BT_DBG("conn %p quote %d", conn, *quote);
3277 return conn;
3278}
3279
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003280static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281{
3282 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003283 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
Ville Tervobae1f5d92011-02-10 22:38:53 -03003285 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003287 rcu_read_lock();
3288
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003290 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003291 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003292 BT_ERR("%s killing stalled connection %pMR",
3293 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003294 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 }
3296 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003297
3298 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299}
3300
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003301static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3302 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003303{
3304 struct hci_conn_hash *h = &hdev->conn_hash;
3305 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003306 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003307 struct hci_conn *conn;
3308 int cnt, q, conn_num = 0;
3309
3310 BT_DBG("%s", hdev->name);
3311
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003312 rcu_read_lock();
3313
3314 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003315 struct hci_chan *tmp;
3316
3317 if (conn->type != type)
3318 continue;
3319
3320 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3321 continue;
3322
3323 conn_num++;
3324
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003325 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003326 struct sk_buff *skb;
3327
3328 if (skb_queue_empty(&tmp->data_q))
3329 continue;
3330
3331 skb = skb_peek(&tmp->data_q);
3332 if (skb->priority < cur_prio)
3333 continue;
3334
3335 if (skb->priority > cur_prio) {
3336 num = 0;
3337 min = ~0;
3338 cur_prio = skb->priority;
3339 }
3340
3341 num++;
3342
3343 if (conn->sent < min) {
3344 min = conn->sent;
3345 chan = tmp;
3346 }
3347 }
3348
3349 if (hci_conn_num(hdev, type) == conn_num)
3350 break;
3351 }
3352
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003353 rcu_read_unlock();
3354
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003355 if (!chan)
3356 return NULL;
3357
3358 switch (chan->conn->type) {
3359 case ACL_LINK:
3360 cnt = hdev->acl_cnt;
3361 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003362 case AMP_LINK:
3363 cnt = hdev->block_cnt;
3364 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003365 case SCO_LINK:
3366 case ESCO_LINK:
3367 cnt = hdev->sco_cnt;
3368 break;
3369 case LE_LINK:
3370 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3371 break;
3372 default:
3373 cnt = 0;
3374 BT_ERR("Unknown link type");
3375 }
3376
3377 q = cnt / num;
3378 *quote = q ? q : 1;
3379 BT_DBG("chan %p quote %d", chan, *quote);
3380 return chan;
3381}
3382
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003383static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3384{
3385 struct hci_conn_hash *h = &hdev->conn_hash;
3386 struct hci_conn *conn;
3387 int num = 0;
3388
3389 BT_DBG("%s", hdev->name);
3390
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003391 rcu_read_lock();
3392
3393 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003394 struct hci_chan *chan;
3395
3396 if (conn->type != type)
3397 continue;
3398
3399 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3400 continue;
3401
3402 num++;
3403
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003404 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003405 struct sk_buff *skb;
3406
3407 if (chan->sent) {
3408 chan->sent = 0;
3409 continue;
3410 }
3411
3412 if (skb_queue_empty(&chan->data_q))
3413 continue;
3414
3415 skb = skb_peek(&chan->data_q);
3416 if (skb->priority >= HCI_PRIO_MAX - 1)
3417 continue;
3418
3419 skb->priority = HCI_PRIO_MAX - 1;
3420
3421 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003422 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003423 }
3424
3425 if (hci_conn_num(hdev, type) == num)
3426 break;
3427 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003428
3429 rcu_read_unlock();
3430
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003431}
3432
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003433static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3434{
3435 /* Calculate count of blocks used by this packet */
3436 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3437}
3438
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003439static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 if (!test_bit(HCI_RAW, &hdev->flags)) {
3442 /* ACL tx timeout must be longer than maximum
3443 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003444 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003445 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003446 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003448}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003450static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003451{
3452 unsigned int cnt = hdev->acl_cnt;
3453 struct hci_chan *chan;
3454 struct sk_buff *skb;
3455 int quote;
3456
3457 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003458
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003459 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003460 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003461 u32 priority = (skb_peek(&chan->data_q))->priority;
3462 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003463 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003464 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003465
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003466 /* Stop if priority has changed */
3467 if (skb->priority < priority)
3468 break;
3469
3470 skb = skb_dequeue(&chan->data_q);
3471
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003472 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003473 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003474
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003475 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 hdev->acl_last_tx = jiffies;
3477
3478 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003479 chan->sent++;
3480 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 }
3482 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003483
3484 if (cnt != hdev->acl_cnt)
3485 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486}
3487
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003488static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003489{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003490 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003491 struct hci_chan *chan;
3492 struct sk_buff *skb;
3493 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003494 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003495
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003496 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003497
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003498 BT_DBG("%s", hdev->name);
3499
3500 if (hdev->dev_type == HCI_AMP)
3501 type = AMP_LINK;
3502 else
3503 type = ACL_LINK;
3504
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003505 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003506 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003507 u32 priority = (skb_peek(&chan->data_q))->priority;
3508 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3509 int blocks;
3510
3511 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003512 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003513
3514 /* Stop if priority has changed */
3515 if (skb->priority < priority)
3516 break;
3517
3518 skb = skb_dequeue(&chan->data_q);
3519
3520 blocks = __get_blocks(hdev, skb);
3521 if (blocks > hdev->block_cnt)
3522 return;
3523
3524 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003525 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003526
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003527 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003528 hdev->acl_last_tx = jiffies;
3529
3530 hdev->block_cnt -= blocks;
3531 quote -= blocks;
3532
3533 chan->sent += blocks;
3534 chan->conn->sent += blocks;
3535 }
3536 }
3537
3538 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003539 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003540}
3541
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003542static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003543{
3544 BT_DBG("%s", hdev->name);
3545
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003546 /* No ACL link over BR/EDR controller */
3547 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3548 return;
3549
3550 /* No AMP link over AMP controller */
3551 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003552 return;
3553
3554 switch (hdev->flow_ctl_mode) {
3555 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3556 hci_sched_acl_pkt(hdev);
3557 break;
3558
3559 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3560 hci_sched_acl_blk(hdev);
3561 break;
3562 }
3563}
3564
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003566static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
3568 struct hci_conn *conn;
3569 struct sk_buff *skb;
3570 int quote;
3571
3572 BT_DBG("%s", hdev->name);
3573
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003574 if (!hci_conn_num(hdev, SCO_LINK))
3575 return;
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3578 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3579 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003580 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581
3582 conn->sent++;
3583 if (conn->sent == ~0)
3584 conn->sent = 0;
3585 }
3586 }
3587}
3588
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003589static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003590{
3591 struct hci_conn *conn;
3592 struct sk_buff *skb;
3593 int quote;
3594
3595 BT_DBG("%s", hdev->name);
3596
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003597 if (!hci_conn_num(hdev, ESCO_LINK))
3598 return;
3599
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3601 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3603 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003604 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003605
3606 conn->sent++;
3607 if (conn->sent == ~0)
3608 conn->sent = 0;
3609 }
3610 }
3611}
3612
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003613static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003614{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003615 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003616 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003617 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003618
3619 BT_DBG("%s", hdev->name);
3620
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003621 if (!hci_conn_num(hdev, LE_LINK))
3622 return;
3623
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003624 if (!test_bit(HCI_RAW, &hdev->flags)) {
3625 /* LE tx timeout must be longer than maximum
3626 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003627 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003629 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003630 }
3631
3632 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003633 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003634 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003635 u32 priority = (skb_peek(&chan->data_q))->priority;
3636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003638 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003639
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003640 /* Stop if priority has changed */
3641 if (skb->priority < priority)
3642 break;
3643
3644 skb = skb_dequeue(&chan->data_q);
3645
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003646 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003647 hdev->le_last_tx = jiffies;
3648
3649 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003650 chan->sent++;
3651 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003652 }
3653 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003654
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003655 if (hdev->le_pkts)
3656 hdev->le_cnt = cnt;
3657 else
3658 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003659
3660 if (cnt != tmp)
3661 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003662}
3663
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003664static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003666 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 struct sk_buff *skb;
3668
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003670 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671
Marcel Holtmann52de5992013-09-03 18:08:38 -07003672 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3673 /* Schedule queues and send stuff to HCI driver */
3674 hci_sched_acl(hdev);
3675 hci_sched_sco(hdev);
3676 hci_sched_esco(hdev);
3677 hci_sched_le(hdev);
3678 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003679
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 /* Send next queued raw (unknown type) packet */
3681 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003682 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683}
3684
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003685/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686
3687/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003688static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689{
3690 struct hci_acl_hdr *hdr = (void *) skb->data;
3691 struct hci_conn *conn;
3692 __u16 handle, flags;
3693
3694 skb_pull(skb, HCI_ACL_HDR_SIZE);
3695
3696 handle = __le16_to_cpu(hdr->handle);
3697 flags = hci_flags(handle);
3698 handle = hci_handle(handle);
3699
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003700 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003701 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702
3703 hdev->stat.acl_rx++;
3704
3705 hci_dev_lock(hdev);
3706 conn = hci_conn_hash_lookup_handle(hdev, handle);
3707 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003708
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003710 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003711
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003713 l2cap_recv_acldata(conn, skb, flags);
3714 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003716 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003717 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 }
3719
3720 kfree_skb(skb);
3721}
3722
3723/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003724static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725{
3726 struct hci_sco_hdr *hdr = (void *) skb->data;
3727 struct hci_conn *conn;
3728 __u16 handle;
3729
3730 skb_pull(skb, HCI_SCO_HDR_SIZE);
3731
3732 handle = __le16_to_cpu(hdr->handle);
3733
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003734 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735
3736 hdev->stat.sco_rx++;
3737
3738 hci_dev_lock(hdev);
3739 conn = hci_conn_hash_lookup_handle(hdev, handle);
3740 hci_dev_unlock(hdev);
3741
3742 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003744 sco_recv_scodata(conn, skb);
3745 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003747 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003748 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 }
3750
3751 kfree_skb(skb);
3752}
3753
Johan Hedberg9238f362013-03-05 20:37:48 +02003754static bool hci_req_is_complete(struct hci_dev *hdev)
3755{
3756 struct sk_buff *skb;
3757
3758 skb = skb_peek(&hdev->cmd_q);
3759 if (!skb)
3760 return true;
3761
3762 return bt_cb(skb)->req.start;
3763}
3764
Johan Hedberg42c6b122013-03-05 20:37:49 +02003765static void hci_resend_last(struct hci_dev *hdev)
3766{
3767 struct hci_command_hdr *sent;
3768 struct sk_buff *skb;
3769 u16 opcode;
3770
3771 if (!hdev->sent_cmd)
3772 return;
3773
3774 sent = (void *) hdev->sent_cmd->data;
3775 opcode = __le16_to_cpu(sent->opcode);
3776 if (opcode == HCI_OP_RESET)
3777 return;
3778
3779 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3780 if (!skb)
3781 return;
3782
3783 skb_queue_head(&hdev->cmd_q, skb);
3784 queue_work(hdev->workqueue, &hdev->cmd_work);
3785}
3786
Johan Hedberg9238f362013-03-05 20:37:48 +02003787void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3788{
3789 hci_req_complete_t req_complete = NULL;
3790 struct sk_buff *skb;
3791 unsigned long flags;
3792
3793 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3794
Johan Hedberg42c6b122013-03-05 20:37:49 +02003795 /* If the completed command doesn't match the last one that was
3796 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003797 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003798 if (!hci_sent_cmd_data(hdev, opcode)) {
3799 /* Some CSR based controllers generate a spontaneous
3800 * reset complete event during init and any pending
3801 * command will never be completed. In such a case we
3802 * need to resend whatever was the last sent
3803 * command.
3804 */
3805 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3806 hci_resend_last(hdev);
3807
Johan Hedberg9238f362013-03-05 20:37:48 +02003808 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003809 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003810
3811 /* If the command succeeded and there's still more commands in
3812 * this request the request is not yet complete.
3813 */
3814 if (!status && !hci_req_is_complete(hdev))
3815 return;
3816
3817 /* If this was the last command in a request the complete
3818 * callback would be found in hdev->sent_cmd instead of the
3819 * command queue (hdev->cmd_q).
3820 */
3821 if (hdev->sent_cmd) {
3822 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003823
3824 if (req_complete) {
3825 /* We must set the complete callback to NULL to
3826 * avoid calling the callback more than once if
3827 * this function gets called again.
3828 */
3829 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3830
Johan Hedberg9238f362013-03-05 20:37:48 +02003831 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003832 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003833 }
3834
3835 /* Remove all pending commands belonging to this request */
3836 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3837 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3838 if (bt_cb(skb)->req.start) {
3839 __skb_queue_head(&hdev->cmd_q, skb);
3840 break;
3841 }
3842
3843 req_complete = bt_cb(skb)->req.complete;
3844 kfree_skb(skb);
3845 }
3846 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3847
3848call_complete:
3849 if (req_complete)
3850 req_complete(hdev, status);
3851}
3852
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003853static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003855 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003856 struct sk_buff *skb;
3857
3858 BT_DBG("%s", hdev->name);
3859
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003861 /* Send copy to monitor */
3862 hci_send_to_monitor(hdev, skb);
3863
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 if (atomic_read(&hdev->promisc)) {
3865 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003866 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 }
3868
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003869 if (test_bit(HCI_RAW, &hdev->flags) ||
3870 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 kfree_skb(skb);
3872 continue;
3873 }
3874
3875 if (test_bit(HCI_INIT, &hdev->flags)) {
3876 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003877 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 case HCI_ACLDATA_PKT:
3879 case HCI_SCODATA_PKT:
3880 kfree_skb(skb);
3881 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 }
3884
3885 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003886 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003888 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 hci_event_packet(hdev, skb);
3890 break;
3891
3892 case HCI_ACLDATA_PKT:
3893 BT_DBG("%s ACL data packet", hdev->name);
3894 hci_acldata_packet(hdev, skb);
3895 break;
3896
3897 case HCI_SCODATA_PKT:
3898 BT_DBG("%s SCO data packet", hdev->name);
3899 hci_scodata_packet(hdev, skb);
3900 break;
3901
3902 default:
3903 kfree_skb(skb);
3904 break;
3905 }
3906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907}
3908
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003909static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003911 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 struct sk_buff *skb;
3913
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003914 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3915 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003918 if (atomic_read(&hdev->cmd_cnt)) {
3919 skb = skb_dequeue(&hdev->cmd_q);
3920 if (!skb)
3921 return;
3922
Wei Yongjun7585b972009-02-25 18:29:52 +08003923 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003925 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003926 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003928 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003929 if (test_bit(HCI_RESET, &hdev->flags))
3930 del_timer(&hdev->cmd_timer);
3931 else
3932 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003933 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 } else {
3935 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003936 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 }
3938 }
3939}