blob: e8058c3c957675c027e5e69151726e1b74e8545c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070061static int blacklist_show(struct seq_file *f, void *p)
62{
63 struct hci_dev *hdev = f->private;
64 struct bdaddr_list *b;
65
66 hci_dev_lock(hdev);
67 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070068 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070069 hci_dev_unlock(hdev);
70
71 return 0;
72}
73
74static int blacklist_open(struct inode *inode, struct file *file)
75{
76 return single_open(file, blacklist_show, inode->i_private);
77}
78
79static const struct file_operations blacklist_fops = {
80 .open = blacklist_open,
81 .read = seq_read,
82 .llseek = seq_lseek,
83 .release = single_release,
84};
85
Marcel Holtmann47219832013-10-17 17:24:15 -070086static int uuids_show(struct seq_file *f, void *p)
87{
88 struct hci_dev *hdev = f->private;
89 struct bt_uuid *uuid;
90
91 hci_dev_lock(hdev);
92 list_for_each_entry(uuid, &hdev->uuids, list) {
93 u32 data0, data5;
94 u16 data1, data2, data3, data4;
95
96 data5 = get_unaligned_le32(uuid);
97 data4 = get_unaligned_le16(uuid + 4);
98 data3 = get_unaligned_le16(uuid + 6);
99 data2 = get_unaligned_le16(uuid + 8);
100 data1 = get_unaligned_le16(uuid + 10);
101 data0 = get_unaligned_le32(uuid + 12);
102
103 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
104 data0, data1, data2, data3, data4, data5);
105 }
106 hci_dev_unlock(hdev);
107
108 return 0;
109}
110
111static int uuids_open(struct inode *inode, struct file *file)
112{
113 return single_open(file, uuids_show, inode->i_private);
114}
115
116static const struct file_operations uuids_fops = {
117 .open = uuids_open,
118 .read = seq_read,
119 .llseek = seq_lseek,
120 .release = single_release,
121};
122
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700123static int inquiry_cache_show(struct seq_file *f, void *p)
124{
125 struct hci_dev *hdev = f->private;
126 struct discovery_state *cache = &hdev->discovery;
127 struct inquiry_entry *e;
128
129 hci_dev_lock(hdev);
130
131 list_for_each_entry(e, &cache->all, all) {
132 struct inquiry_data *data = &e->data;
133 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
134 &data->bdaddr,
135 data->pscan_rep_mode, data->pscan_period_mode,
136 data->pscan_mode, data->dev_class[2],
137 data->dev_class[1], data->dev_class[0],
138 __le16_to_cpu(data->clock_offset),
139 data->rssi, data->ssp_mode, e->timestamp);
140 }
141
142 hci_dev_unlock(hdev);
143
144 return 0;
145}
146
147static int inquiry_cache_open(struct inode *inode, struct file *file)
148{
149 return single_open(file, inquiry_cache_show, inode->i_private);
150}
151
152static const struct file_operations inquiry_cache_fops = {
153 .open = inquiry_cache_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
156 .release = single_release,
157};
158
Marcel Holtmann041000b2013-10-17 12:02:31 -0700159static int voice_setting_get(void *data, u64 *val)
160{
161 struct hci_dev *hdev = data;
162
163 hci_dev_lock(hdev);
164 *val = hdev->voice_setting;
165 hci_dev_unlock(hdev);
166
167 return 0;
168}
169
170DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
171 NULL, "0x%4.4llx\n");
172
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700173static int auto_accept_delay_set(void *data, u64 val)
174{
175 struct hci_dev *hdev = data;
176
177 hci_dev_lock(hdev);
178 hdev->auto_accept_delay = val;
179 hci_dev_unlock(hdev);
180
181 return 0;
182}
183
184static int auto_accept_delay_get(void *data, u64 *val)
185{
186 struct hci_dev *hdev = data;
187
188 hci_dev_lock(hdev);
189 *val = hdev->auto_accept_delay;
190 hci_dev_unlock(hdev);
191
192 return 0;
193}
194
195DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
196 auto_accept_delay_set, "%llu\n");
197
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700198static int idle_timeout_set(void *data, u64 val)
199{
200 struct hci_dev *hdev = data;
201
202 if (val != 0 && (val < 500 || val > 3600000))
203 return -EINVAL;
204
205 hci_dev_lock(hdev);
206 hdev->idle_timeout= val;
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int idle_timeout_get(void *data, u64 *val)
213{
214 struct hci_dev *hdev = data;
215
216 hci_dev_lock(hdev);
217 *val = hdev->idle_timeout;
218 hci_dev_unlock(hdev);
219
220 return 0;
221}
222
223DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
224 idle_timeout_set, "%llu\n");
225
226static int sniff_min_interval_set(void *data, u64 val)
227{
228 struct hci_dev *hdev = data;
229
230 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
231 return -EINVAL;
232
233 hci_dev_lock(hdev);
234 hdev->sniff_min_interval= val;
235 hci_dev_unlock(hdev);
236
237 return 0;
238}
239
240static int sniff_min_interval_get(void *data, u64 *val)
241{
242 struct hci_dev *hdev = data;
243
244 hci_dev_lock(hdev);
245 *val = hdev->sniff_min_interval;
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
252 sniff_min_interval_set, "%llu\n");
253
254static int sniff_max_interval_set(void *data, u64 val)
255{
256 struct hci_dev *hdev = data;
257
258 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
259 return -EINVAL;
260
261 hci_dev_lock(hdev);
262 hdev->sniff_max_interval= val;
263 hci_dev_unlock(hdev);
264
265 return 0;
266}
267
268static int sniff_max_interval_get(void *data, u64 *val)
269{
270 struct hci_dev *hdev = data;
271
272 hci_dev_lock(hdev);
273 *val = hdev->sniff_max_interval;
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
280 sniff_max_interval_set, "%llu\n");
281
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700282static int static_address_show(struct seq_file *f, void *p)
283{
284 struct hci_dev *hdev = f->private;
285
286 hci_dev_lock(hdev);
287 seq_printf(f, "%pMR\n", &hdev->static_addr);
288 hci_dev_unlock(hdev);
289
290 return 0;
291}
292
293static int static_address_open(struct inode *inode, struct file *file)
294{
295 return single_open(file, static_address_show, inode->i_private);
296}
297
298static const struct file_operations static_address_fops = {
299 .open = static_address_open,
300 .read = seq_read,
301 .llseek = seq_lseek,
302 .release = single_release,
303};
304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305/* ---- HCI requests ---- */
306
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200309 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 if (hdev->req_status == HCI_REQ_PEND) {
312 hdev->req_result = result;
313 hdev->req_status = HCI_REQ_DONE;
314 wake_up_interruptible(&hdev->req_wait_q);
315 }
316}
317
318static void hci_req_cancel(struct hci_dev *hdev, int err)
319{
320 BT_DBG("%s err 0x%2.2x", hdev->name, err);
321
322 if (hdev->req_status == HCI_REQ_PEND) {
323 hdev->req_result = err;
324 hdev->req_status = HCI_REQ_CANCELED;
325 wake_up_interruptible(&hdev->req_wait_q);
326 }
327}
328
Fengguang Wu77a63e02013-04-20 16:24:31 +0300329static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
330 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300331{
332 struct hci_ev_cmd_complete *ev;
333 struct hci_event_hdr *hdr;
334 struct sk_buff *skb;
335
336 hci_dev_lock(hdev);
337
338 skb = hdev->recv_evt;
339 hdev->recv_evt = NULL;
340
341 hci_dev_unlock(hdev);
342
343 if (!skb)
344 return ERR_PTR(-ENODATA);
345
346 if (skb->len < sizeof(*hdr)) {
347 BT_ERR("Too short HCI event");
348 goto failed;
349 }
350
351 hdr = (void *) skb->data;
352 skb_pull(skb, HCI_EVENT_HDR_SIZE);
353
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300354 if (event) {
355 if (hdr->evt != event)
356 goto failed;
357 return skb;
358 }
359
Johan Hedberg75e84b72013-04-02 13:35:04 +0300360 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
361 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
362 goto failed;
363 }
364
365 if (skb->len < sizeof(*ev)) {
366 BT_ERR("Too short cmd_complete event");
367 goto failed;
368 }
369
370 ev = (void *) skb->data;
371 skb_pull(skb, sizeof(*ev));
372
373 if (opcode == __le16_to_cpu(ev->opcode))
374 return skb;
375
376 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
377 __le16_to_cpu(ev->opcode));
378
379failed:
380 kfree_skb(skb);
381 return ERR_PTR(-ENODATA);
382}
383
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300384struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300385 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300386{
387 DECLARE_WAITQUEUE(wait, current);
388 struct hci_request req;
389 int err = 0;
390
391 BT_DBG("%s", hdev->name);
392
393 hci_req_init(&req, hdev);
394
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300395 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300396
397 hdev->req_status = HCI_REQ_PEND;
398
399 err = hci_req_run(&req, hci_req_sync_complete);
400 if (err < 0)
401 return ERR_PTR(err);
402
403 add_wait_queue(&hdev->req_wait_q, &wait);
404 set_current_state(TASK_INTERRUPTIBLE);
405
406 schedule_timeout(timeout);
407
408 remove_wait_queue(&hdev->req_wait_q, &wait);
409
410 if (signal_pending(current))
411 return ERR_PTR(-EINTR);
412
413 switch (hdev->req_status) {
414 case HCI_REQ_DONE:
415 err = -bt_to_errno(hdev->req_result);
416 break;
417
418 case HCI_REQ_CANCELED:
419 err = -hdev->req_result;
420 break;
421
422 default:
423 err = -ETIMEDOUT;
424 break;
425 }
426
427 hdev->req_status = hdev->req_result = 0;
428
429 BT_DBG("%s end: err %d", hdev->name, err);
430
431 if (err < 0)
432 return ERR_PTR(err);
433
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300434 return hci_get_cmd_complete(hdev, opcode, event);
435}
436EXPORT_SYMBOL(__hci_cmd_sync_ev);
437
438struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300439 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300440{
441 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300442}
443EXPORT_SYMBOL(__hci_cmd_sync);
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200446static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447 void (*func)(struct hci_request *req,
448 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200449 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 DECLARE_WAITQUEUE(wait, current);
453 int err = 0;
454
455 BT_DBG("%s start", hdev->name);
456
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457 hci_req_init(&req, hdev);
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 hdev->req_status = HCI_REQ_PEND;
460
Johan Hedberg42c6b122013-03-05 20:37:49 +0200461 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200462
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463 err = hci_req_run(&req, hci_req_sync_complete);
464 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200465 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300466
467 /* ENODATA means the HCI request command queue is empty.
468 * This can happen when a request with conditionals doesn't
469 * trigger any commands to be sent. This is normal behavior
470 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 */
Andre Guedes920c8302013-03-08 11:20:15 -0300472 if (err == -ENODATA)
473 return 0;
474
475 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200476 }
477
Andre Guedesbc4445c2013-03-08 11:20:13 -0300478 add_wait_queue(&hdev->req_wait_q, &wait);
479 set_current_state(TASK_INTERRUPTIBLE);
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 schedule_timeout(timeout);
482
483 remove_wait_queue(&hdev->req_wait_q, &wait);
484
485 if (signal_pending(current))
486 return -EINTR;
487
488 switch (hdev->req_status) {
489 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700490 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 break;
492
493 case HCI_REQ_CANCELED:
494 err = -hdev->req_result;
495 break;
496
497 default:
498 err = -ETIMEDOUT;
499 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Johan Hedberga5040ef2011-01-10 13:28:59 +0200502 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 BT_DBG("%s end: err %d", hdev->name, err);
505
506 return err;
507}
508
Johan Hedberg01178cd2013-03-05 20:37:41 +0200509static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 void (*req)(struct hci_request *req,
511 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200512 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 int ret;
515
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200516 if (!test_bit(HCI_UP, &hdev->flags))
517 return -ENETDOWN;
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 /* Serialize all requests */
520 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200521 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 hci_req_unlock(hdev);
523
524 return ret;
525}
526
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 set_bit(HCI_RESET, &req->hdev->flags);
533 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534}
535
Johan Hedberg42c6b122013-03-05 20:37:49 +0200536static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200541 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200543 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545
546 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548}
549
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200551{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200553
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200554 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300556
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700557 /* Read Local Supported Commands */
558 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
559
560 /* Read Local Supported Features */
561 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
562
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300563 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300565
566 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700568
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700569 /* Read Flow Control Mode */
570 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
571
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700572 /* Read Location Data */
573 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200574}
575
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200577{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200579
580 BT_DBG("%s %ld", hdev->name, opt);
581
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300582 /* Reset */
583 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300585
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200586 switch (hdev->dev_type) {
587 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200588 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200589 break;
590
591 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200592 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200593 break;
594
595 default:
596 BT_ERR("Unknown device type %d", hdev->dev_type);
597 break;
598 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200599}
600
Johan Hedberg42c6b122013-03-05 20:37:49 +0200601static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200602{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700603 struct hci_dev *hdev = req->hdev;
604
Johan Hedberg2177bab2013-03-05 20:37:43 +0200605 __le16 param;
606 __u8 flt_type;
607
608 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610
611 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613
614 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616
617 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200618 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700620 /* Read Number of Supported IAC */
621 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
622
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700623 /* Read Current IAC LAP */
624 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
625
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626 /* Clear Event Filters */
627 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200628 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200629
630 /* Connection accept timeout ~20 secs */
631 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700634 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
635 * but it does not support page scan related HCI commands.
636 */
637 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500638 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
639 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
640 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641}
642
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200644{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300645 struct hci_dev *hdev = req->hdev;
646
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649
650 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200652
653 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200655
656 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658
659 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300661
662 /* LE-only controllers have LE implicitly enabled */
663 if (!lmp_bredr_capable(hdev))
664 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200665}
666
667static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
668{
669 if (lmp_ext_inq_capable(hdev))
670 return 0x02;
671
672 if (lmp_inq_rssi_capable(hdev))
673 return 0x01;
674
675 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
676 hdev->lmp_subver == 0x0757)
677 return 0x01;
678
679 if (hdev->manufacturer == 15) {
680 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
681 return 0x01;
682 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
683 return 0x01;
684 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
685 return 0x01;
686 }
687
688 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
689 hdev->lmp_subver == 0x1805)
690 return 0x01;
691
692 return 0x00;
693}
694
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696{
697 u8 mode;
698
Johan Hedberg42c6b122013-03-05 20:37:49 +0200699 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702}
703
Johan Hedberg42c6b122013-03-05 20:37:49 +0200704static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706 struct hci_dev *hdev = req->hdev;
707
Johan Hedberg2177bab2013-03-05 20:37:43 +0200708 /* The second byte is 0xff instead of 0x9f (two reserved bits
709 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
710 * command otherwise.
711 */
712 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
713
714 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
715 * any event mask for pre 1.2 devices.
716 */
717 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
718 return;
719
720 if (lmp_bredr_capable(hdev)) {
721 events[4] |= 0x01; /* Flow Specification Complete */
722 events[4] |= 0x02; /* Inquiry Result with RSSI */
723 events[4] |= 0x04; /* Read Remote Extended Features Complete */
724 events[5] |= 0x08; /* Synchronous Connection Complete */
725 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700726 } else {
727 /* Use a different default for LE-only devices */
728 memset(events, 0, sizeof(events));
729 events[0] |= 0x10; /* Disconnection Complete */
730 events[0] |= 0x80; /* Encryption Change */
731 events[1] |= 0x08; /* Read Remote Version Information Complete */
732 events[1] |= 0x20; /* Command Complete */
733 events[1] |= 0x40; /* Command Status */
734 events[1] |= 0x80; /* Hardware Error */
735 events[2] |= 0x04; /* Number of Completed Packets */
736 events[3] |= 0x02; /* Data Buffer Overflow */
737 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738 }
739
740 if (lmp_inq_rssi_capable(hdev))
741 events[4] |= 0x02; /* Inquiry Result with RSSI */
742
743 if (lmp_sniffsubr_capable(hdev))
744 events[5] |= 0x20; /* Sniff Subrating */
745
746 if (lmp_pause_enc_capable(hdev))
747 events[5] |= 0x80; /* Encryption Key Refresh Complete */
748
749 if (lmp_ext_inq_capable(hdev))
750 events[5] |= 0x40; /* Extended Inquiry Result */
751
752 if (lmp_no_flush_capable(hdev))
753 events[7] |= 0x01; /* Enhanced Flush Complete */
754
755 if (lmp_lsto_capable(hdev))
756 events[6] |= 0x80; /* Link Supervision Timeout Changed */
757
758 if (lmp_ssp_capable(hdev)) {
759 events[6] |= 0x01; /* IO Capability Request */
760 events[6] |= 0x02; /* IO Capability Response */
761 events[6] |= 0x04; /* User Confirmation Request */
762 events[6] |= 0x08; /* User Passkey Request */
763 events[6] |= 0x10; /* Remote OOB Data Request */
764 events[6] |= 0x20; /* Simple Pairing Complete */
765 events[7] |= 0x04; /* User Passkey Notification */
766 events[7] |= 0x08; /* Keypress Notification */
767 events[7] |= 0x10; /* Remote Host Supported
768 * Features Notification
769 */
770 }
771
772 if (lmp_le_capable(hdev))
773 events[7] |= 0x20; /* LE Meta-Event */
774
Johan Hedberg42c6b122013-03-05 20:37:49 +0200775 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200776
777 if (lmp_le_capable(hdev)) {
778 memset(events, 0, sizeof(events));
779 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200780 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
781 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200782 }
783}
784
Johan Hedberg42c6b122013-03-05 20:37:49 +0200785static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200786{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200787 struct hci_dev *hdev = req->hdev;
788
Johan Hedberg2177bab2013-03-05 20:37:43 +0200789 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200790 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300791 else
792 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200793
794 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200795 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200796
Johan Hedberg42c6b122013-03-05 20:37:49 +0200797 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200798
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300799 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
800 * local supported commands HCI command.
801 */
802 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200803 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200804
805 if (lmp_ssp_capable(hdev)) {
806 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
807 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200808 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
809 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200810 } else {
811 struct hci_cp_write_eir cp;
812
813 memset(hdev->eir, 0, sizeof(hdev->eir));
814 memset(&cp, 0, sizeof(cp));
815
Johan Hedberg42c6b122013-03-05 20:37:49 +0200816 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200817 }
818 }
819
820 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200821 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200822
823 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200824 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200825
826 if (lmp_ext_feat_capable(hdev)) {
827 struct hci_cp_read_local_ext_features cp;
828
829 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200830 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
831 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200832 }
833
834 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
835 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200836 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
837 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200838 }
839}
840
Johan Hedberg42c6b122013-03-05 20:37:49 +0200841static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200842{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200843 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200844 struct hci_cp_write_def_link_policy cp;
845 u16 link_policy = 0;
846
847 if (lmp_rswitch_capable(hdev))
848 link_policy |= HCI_LP_RSWITCH;
849 if (lmp_hold_capable(hdev))
850 link_policy |= HCI_LP_HOLD;
851 if (lmp_sniff_capable(hdev))
852 link_policy |= HCI_LP_SNIFF;
853 if (lmp_park_capable(hdev))
854 link_policy |= HCI_LP_PARK;
855
856 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200857 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200858}
859
Johan Hedberg42c6b122013-03-05 20:37:49 +0200860static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200861{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200862 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200863 struct hci_cp_write_le_host_supported cp;
864
Johan Hedbergc73eee92013-04-19 18:35:21 +0300865 /* LE-only devices do not support explicit enablement */
866 if (!lmp_bredr_capable(hdev))
867 return;
868
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869 memset(&cp, 0, sizeof(cp));
870
871 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
872 cp.le = 0x01;
873 cp.simul = lmp_le_br_capable(hdev);
874 }
875
876 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
878 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200879}
880
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300881static void hci_set_event_mask_page_2(struct hci_request *req)
882{
883 struct hci_dev *hdev = req->hdev;
884 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
885
886 /* If Connectionless Slave Broadcast master role is supported
887 * enable all necessary events for it.
888 */
889 if (hdev->features[2][0] & 0x01) {
890 events[1] |= 0x40; /* Triggered Clock Capture */
891 events[1] |= 0x80; /* Synchronization Train Complete */
892 events[2] |= 0x10; /* Slave Page Response Timeout */
893 events[2] |= 0x20; /* CSB Channel Map Change */
894 }
895
896 /* If Connectionless Slave Broadcast slave role is supported
897 * enable all necessary events for it.
898 */
899 if (hdev->features[2][0] & 0x02) {
900 events[2] |= 0x01; /* Synchronization Train Received */
901 events[2] |= 0x02; /* CSB Receive */
902 events[2] |= 0x04; /* CSB Timeout */
903 events[2] |= 0x08; /* Truncated Page Complete */
904 }
905
906 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
907}
908
Johan Hedberg42c6b122013-03-05 20:37:49 +0200909static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200910{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200911 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300912 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100914 /* Some Broadcom based Bluetooth controllers do not support the
915 * Delete Stored Link Key command. They are clearly indicating its
916 * absence in the bit mask of supported commands.
917 *
918 * Check the supported commands and only if the the command is marked
919 * as supported send it. If not supported assume that the controller
920 * does not have actual support for stored link keys which makes this
921 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700922 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300923 if (hdev->commands[6] & 0x80) {
924 struct hci_cp_delete_stored_link_key cp;
925
926 bacpy(&cp.bdaddr, BDADDR_ANY);
927 cp.delete_all = 0x01;
928 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
929 sizeof(cp), &cp);
930 }
931
Johan Hedberg2177bab2013-03-05 20:37:43 +0200932 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200934
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700935 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300937
938 /* Read features beyond page 1 if available */
939 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
940 struct hci_cp_read_local_ext_features cp;
941
942 cp.page = p;
943 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
944 sizeof(cp), &cp);
945 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200946}
947
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300948static void hci_init4_req(struct hci_request *req, unsigned long opt)
949{
950 struct hci_dev *hdev = req->hdev;
951
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300952 /* Set event mask page 2 if the HCI command for it is supported */
953 if (hdev->commands[22] & 0x04)
954 hci_set_event_mask_page_2(req);
955
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300956 /* Check for Synchronization Train support */
957 if (hdev->features[2][0] & 0x04)
958 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
959}
960
Johan Hedberg2177bab2013-03-05 20:37:43 +0200961static int __hci_init(struct hci_dev *hdev)
962{
963 int err;
964
965 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
966 if (err < 0)
967 return err;
968
969 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
970 * BR/EDR/LE type controllers. AMP controllers only need the
971 * first stage init.
972 */
973 if (hdev->dev_type != HCI_BREDR)
974 return 0;
975
976 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
977 if (err < 0)
978 return err;
979
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300980 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
981 if (err < 0)
982 return err;
983
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700984 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
985 if (err < 0)
986 return err;
987
988 /* Only create debugfs entries during the initial setup
989 * phase and not every time the controller gets powered on.
990 */
991 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
992 return 0;
993
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700994 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
995 &blacklist_fops);
996
Marcel Holtmann47219832013-10-17 17:24:15 -0700997 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
998
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700999 if (lmp_bredr_capable(hdev)) {
1000 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1001 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001002 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1003 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001004 }
1005
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001006 if (lmp_ssp_capable(hdev))
1007 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1008 hdev, &auto_accept_delay_fops);
1009
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001010 if (lmp_sniff_capable(hdev)) {
1011 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1012 hdev, &idle_timeout_fops);
1013 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1014 hdev, &sniff_min_interval_fops);
1015 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1016 hdev, &sniff_max_interval_fops);
1017 }
1018
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001019 if (lmp_le_capable(hdev))
1020 debugfs_create_file("static_address", 0444, hdev->debugfs,
1021 hdev, &static_address_fops);
1022
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001023 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001024}
1025
Johan Hedberg42c6b122013-03-05 20:37:49 +02001026static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
1028 __u8 scan = opt;
1029
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
1032 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
1038 __u8 auth = opt;
1039
Johan Hedberg42c6b122013-03-05 20:37:49 +02001040 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001043 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045
Johan Hedberg42c6b122013-03-05 20:37:49 +02001046static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047{
1048 __u8 encrypt = opt;
1049
Johan Hedberg42c6b122013-03-05 20:37:49 +02001050 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001052 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054}
1055
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001057{
1058 __le16 policy = cpu_to_le16(opt);
1059
Johan Hedberg42c6b122013-03-05 20:37:49 +02001060 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001061
1062 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001063 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001064}
1065
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001066/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 * Device is held on return. */
1068struct hci_dev *hci_dev_get(int index)
1069{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001070 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 BT_DBG("%d", index);
1073
1074 if (index < 0)
1075 return NULL;
1076
1077 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001078 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 if (d->id == index) {
1080 hdev = hci_dev_hold(d);
1081 break;
1082 }
1083 }
1084 read_unlock(&hci_dev_list_lock);
1085 return hdev;
1086}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001089
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001090bool hci_discovery_active(struct hci_dev *hdev)
1091{
1092 struct discovery_state *discov = &hdev->discovery;
1093
Andre Guedes6fbe1952012-02-03 17:47:58 -03001094 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001095 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001096 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001097 return true;
1098
Andre Guedes6fbe1952012-02-03 17:47:58 -03001099 default:
1100 return false;
1101 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001102}
1103
Johan Hedbergff9ef572012-01-04 14:23:45 +02001104void hci_discovery_set_state(struct hci_dev *hdev, int state)
1105{
1106 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1107
1108 if (hdev->discovery.state == state)
1109 return;
1110
1111 switch (state) {
1112 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001113 if (hdev->discovery.state != DISCOVERY_STARTING)
1114 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001115 break;
1116 case DISCOVERY_STARTING:
1117 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001118 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001119 mgmt_discovering(hdev, 1);
1120 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001121 case DISCOVERY_RESOLVING:
1122 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001123 case DISCOVERY_STOPPING:
1124 break;
1125 }
1126
1127 hdev->discovery.state = state;
1128}
1129
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001130void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Johan Hedberg30883512012-01-04 14:16:21 +02001132 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001133 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Johan Hedberg561aafb2012-01-04 13:31:59 +02001135 list_for_each_entry_safe(p, n, &cache->all, all) {
1136 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001137 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001139
1140 INIT_LIST_HEAD(&cache->unknown);
1141 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142}
1143
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001144struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1145 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
Johan Hedberg30883512012-01-04 14:16:21 +02001147 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 struct inquiry_entry *e;
1149
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001150 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Johan Hedberg561aafb2012-01-04 13:31:59 +02001152 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001154 return e;
1155 }
1156
1157 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158}
1159
Johan Hedberg561aafb2012-01-04 13:31:59 +02001160struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001161 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001162{
Johan Hedberg30883512012-01-04 14:16:21 +02001163 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001164 struct inquiry_entry *e;
1165
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001166 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001167
1168 list_for_each_entry(e, &cache->unknown, list) {
1169 if (!bacmp(&e->data.bdaddr, bdaddr))
1170 return e;
1171 }
1172
1173 return NULL;
1174}
1175
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001176struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001177 bdaddr_t *bdaddr,
1178 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001179{
1180 struct discovery_state *cache = &hdev->discovery;
1181 struct inquiry_entry *e;
1182
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001183 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001184
1185 list_for_each_entry(e, &cache->resolve, list) {
1186 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1187 return e;
1188 if (!bacmp(&e->data.bdaddr, bdaddr))
1189 return e;
1190 }
1191
1192 return NULL;
1193}
1194
Johan Hedberga3d4e202012-01-09 00:53:02 +02001195void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001196 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001197{
1198 struct discovery_state *cache = &hdev->discovery;
1199 struct list_head *pos = &cache->resolve;
1200 struct inquiry_entry *p;
1201
1202 list_del(&ie->list);
1203
1204 list_for_each_entry(p, &cache->resolve, list) {
1205 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001206 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001207 break;
1208 pos = &p->list;
1209 }
1210
1211 list_add(&ie->list, pos);
1212}
1213
Johan Hedberg31754052012-01-04 13:39:52 +02001214bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001215 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
Johan Hedberg30883512012-01-04 14:16:21 +02001217 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001218 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001220 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Szymon Janc2b2fec42012-11-20 11:38:54 +01001222 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1223
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001224 if (ssp)
1225 *ssp = data->ssp_mode;
1226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001227 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001228 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001229 if (ie->data.ssp_mode && ssp)
1230 *ssp = true;
1231
Johan Hedberga3d4e202012-01-09 00:53:02 +02001232 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001233 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001234 ie->data.rssi = data->rssi;
1235 hci_inquiry_cache_update_resolve(hdev, ie);
1236 }
1237
Johan Hedberg561aafb2012-01-04 13:31:59 +02001238 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001239 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001240
Johan Hedberg561aafb2012-01-04 13:31:59 +02001241 /* Entry not in the cache. Add new one. */
1242 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1243 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001244 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001245
1246 list_add(&ie->all, &cache->all);
1247
1248 if (name_known) {
1249 ie->name_state = NAME_KNOWN;
1250 } else {
1251 ie->name_state = NAME_NOT_KNOWN;
1252 list_add(&ie->list, &cache->unknown);
1253 }
1254
1255update:
1256 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001257 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001258 ie->name_state = NAME_KNOWN;
1259 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 }
1261
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001262 memcpy(&ie->data, data, sizeof(*data));
1263 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001265
1266 if (ie->name_state == NAME_NOT_KNOWN)
1267 return false;
1268
1269 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270}
1271
1272static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1273{
Johan Hedberg30883512012-01-04 14:16:21 +02001274 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 struct inquiry_info *info = (struct inquiry_info *) buf;
1276 struct inquiry_entry *e;
1277 int copied = 0;
1278
Johan Hedberg561aafb2012-01-04 13:31:59 +02001279 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001281
1282 if (copied >= num)
1283 break;
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 bacpy(&info->bdaddr, &data->bdaddr);
1286 info->pscan_rep_mode = data->pscan_rep_mode;
1287 info->pscan_period_mode = data->pscan_period_mode;
1288 info->pscan_mode = data->pscan_mode;
1289 memcpy(info->dev_class, data->dev_class, 3);
1290 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001293 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 }
1295
1296 BT_DBG("cache %p, copied %d", cache, copied);
1297 return copied;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 struct hci_cp_inquiry cp;
1305
1306 BT_DBG("%s", hdev->name);
1307
1308 if (test_bit(HCI_INQUIRY, &hdev->flags))
1309 return;
1310
1311 /* Start Inquiry */
1312 memcpy(&cp.lap, &ir->lap, 3);
1313 cp.length = ir->length;
1314 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316}
1317
Andre Guedes3e13fa12013-03-27 20:04:56 -03001318static int wait_inquiry(void *word)
1319{
1320 schedule();
1321 return signal_pending(current);
1322}
1323
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324int hci_inquiry(void __user *arg)
1325{
1326 __u8 __user *ptr = arg;
1327 struct hci_inquiry_req ir;
1328 struct hci_dev *hdev;
1329 int err = 0, do_inquiry = 0, max_rsp;
1330 long timeo;
1331 __u8 *buf;
1332
1333 if (copy_from_user(&ir, ptr, sizeof(ir)))
1334 return -EFAULT;
1335
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001336 hdev = hci_dev_get(ir.dev_id);
1337 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 return -ENODEV;
1339
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001340 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1341 err = -EBUSY;
1342 goto done;
1343 }
1344
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001345 if (hdev->dev_type != HCI_BREDR) {
1346 err = -EOPNOTSUPP;
1347 goto done;
1348 }
1349
Johan Hedberg56f87902013-10-02 13:43:13 +03001350 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1351 err = -EOPNOTSUPP;
1352 goto done;
1353 }
1354
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001355 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001356 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001357 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001358 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 do_inquiry = 1;
1360 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001361 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Marcel Holtmann04837f62006-07-03 10:02:33 +02001363 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001364
1365 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001366 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1367 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001368 if (err < 0)
1369 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001370
1371 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1372 * cleared). If it is interrupted by a signal, return -EINTR.
1373 */
1374 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1375 TASK_INTERRUPTIBLE))
1376 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001379 /* for unlimited number of responses we will use buffer with
1380 * 255 entries
1381 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1383
1384 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1385 * copy it to the user space.
1386 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001387 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001388 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 err = -ENOMEM;
1390 goto done;
1391 }
1392
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001393 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001395 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397 BT_DBG("num_rsp %d", ir.num_rsp);
1398
1399 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1400 ptr += sizeof(ir);
1401 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001402 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001404 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 err = -EFAULT;
1406
1407 kfree(buf);
1408
1409done:
1410 hci_dev_put(hdev);
1411 return err;
1412}
1413
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001414static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 int ret = 0;
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 BT_DBG("%s %p", hdev->name, hdev);
1419
1420 hci_req_lock(hdev);
1421
Johan Hovold94324962012-03-15 14:48:41 +01001422 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1423 ret = -ENODEV;
1424 goto done;
1425 }
1426
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001427 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1428 /* Check for rfkill but allow the HCI setup stage to
1429 * proceed (which in itself doesn't cause any RF activity).
1430 */
1431 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1432 ret = -ERFKILL;
1433 goto done;
1434 }
1435
1436 /* Check for valid public address or a configured static
1437 * random adddress, but let the HCI setup proceed to
1438 * be able to determine if there is a public address
1439 * or not.
1440 *
1441 * This check is only valid for BR/EDR controllers
1442 * since AMP controllers do not have an address.
1443 */
1444 if (hdev->dev_type == HCI_BREDR &&
1445 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1446 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1447 ret = -EADDRNOTAVAIL;
1448 goto done;
1449 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001450 }
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 if (test_bit(HCI_UP, &hdev->flags)) {
1453 ret = -EALREADY;
1454 goto done;
1455 }
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 if (hdev->open(hdev)) {
1458 ret = -EIO;
1459 goto done;
1460 }
1461
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001462 atomic_set(&hdev->cmd_cnt, 1);
1463 set_bit(HCI_INIT, &hdev->flags);
1464
1465 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1466 ret = hdev->setup(hdev);
1467
1468 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001469 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1470 set_bit(HCI_RAW, &hdev->flags);
1471
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001472 if (!test_bit(HCI_RAW, &hdev->flags) &&
1473 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001474 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 }
1476
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001477 clear_bit(HCI_INIT, &hdev->flags);
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 if (!ret) {
1480 hci_dev_hold(hdev);
1481 set_bit(HCI_UP, &hdev->flags);
1482 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001483 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001484 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001485 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001486 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001487 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001488 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001489 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001490 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001492 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001493 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001494 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
1496 skb_queue_purge(&hdev->cmd_q);
1497 skb_queue_purge(&hdev->rx_q);
1498
1499 if (hdev->flush)
1500 hdev->flush(hdev);
1501
1502 if (hdev->sent_cmd) {
1503 kfree_skb(hdev->sent_cmd);
1504 hdev->sent_cmd = NULL;
1505 }
1506
1507 hdev->close(hdev);
1508 hdev->flags = 0;
1509 }
1510
1511done:
1512 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 return ret;
1514}
1515
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001516/* ---- HCI ioctl helpers ---- */
1517
1518int hci_dev_open(__u16 dev)
1519{
1520 struct hci_dev *hdev;
1521 int err;
1522
1523 hdev = hci_dev_get(dev);
1524 if (!hdev)
1525 return -ENODEV;
1526
Johan Hedberge1d08f42013-10-01 22:44:50 +03001527 /* We need to ensure that no other power on/off work is pending
1528 * before proceeding to call hci_dev_do_open. This is
1529 * particularly important if the setup procedure has not yet
1530 * completed.
1531 */
1532 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1533 cancel_delayed_work(&hdev->power_off);
1534
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001535 /* After this call it is guaranteed that the setup procedure
1536 * has finished. This means that error conditions like RFKILL
1537 * or no valid public or static random address apply.
1538 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001539 flush_workqueue(hdev->req_workqueue);
1540
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001541 err = hci_dev_do_open(hdev);
1542
1543 hci_dev_put(hdev);
1544
1545 return err;
1546}
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548static int hci_dev_do_close(struct hci_dev *hdev)
1549{
1550 BT_DBG("%s %p", hdev->name, hdev);
1551
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001552 cancel_delayed_work(&hdev->power_off);
1553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 hci_req_cancel(hdev, ENODEV);
1555 hci_req_lock(hdev);
1556
1557 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001558 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 hci_req_unlock(hdev);
1560 return 0;
1561 }
1562
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001563 /* Flush RX and TX works */
1564 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001565 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001567 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001568 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001569 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001570 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001571 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001572 }
1573
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001574 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001575 cancel_delayed_work(&hdev->service_cache);
1576
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001577 cancel_delayed_work_sync(&hdev->le_scan_disable);
1578
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001579 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001580 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001582 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 hci_notify(hdev, HCI_DEV_DOWN);
1585
1586 if (hdev->flush)
1587 hdev->flush(hdev);
1588
1589 /* Reset device */
1590 skb_queue_purge(&hdev->cmd_q);
1591 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001592 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001593 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001594 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001596 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 clear_bit(HCI_INIT, &hdev->flags);
1598 }
1599
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001600 /* flush cmd work */
1601 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602
1603 /* Drop queues */
1604 skb_queue_purge(&hdev->rx_q);
1605 skb_queue_purge(&hdev->cmd_q);
1606 skb_queue_purge(&hdev->raw_q);
1607
1608 /* Drop last sent command */
1609 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001610 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 kfree_skb(hdev->sent_cmd);
1612 hdev->sent_cmd = NULL;
1613 }
1614
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001615 kfree_skb(hdev->recv_evt);
1616 hdev->recv_evt = NULL;
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 /* After this point our queues are empty
1619 * and no tasks are scheduled. */
1620 hdev->close(hdev);
1621
Johan Hedberg35b973c2013-03-15 17:06:59 -05001622 /* Clear flags */
1623 hdev->flags = 0;
1624 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1625
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001626 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1627 if (hdev->dev_type == HCI_BREDR) {
1628 hci_dev_lock(hdev);
1629 mgmt_powered(hdev, 0);
1630 hci_dev_unlock(hdev);
1631 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001632 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001633
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001634 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001635 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001636
Johan Hedberge59fda82012-02-22 18:11:53 +02001637 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001638 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 hci_req_unlock(hdev);
1641
1642 hci_dev_put(hdev);
1643 return 0;
1644}
1645
1646int hci_dev_close(__u16 dev)
1647{
1648 struct hci_dev *hdev;
1649 int err;
1650
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001651 hdev = hci_dev_get(dev);
1652 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001654
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001655 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1656 err = -EBUSY;
1657 goto done;
1658 }
1659
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001660 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1661 cancel_delayed_work(&hdev->power_off);
1662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001664
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001665done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 hci_dev_put(hdev);
1667 return err;
1668}
1669
1670int hci_dev_reset(__u16 dev)
1671{
1672 struct hci_dev *hdev;
1673 int ret = 0;
1674
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001675 hdev = hci_dev_get(dev);
1676 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 return -ENODEV;
1678
1679 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Marcel Holtmann808a0492013-08-26 20:57:58 -07001681 if (!test_bit(HCI_UP, &hdev->flags)) {
1682 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001686 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1687 ret = -EBUSY;
1688 goto done;
1689 }
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 /* Drop queues */
1692 skb_queue_purge(&hdev->rx_q);
1693 skb_queue_purge(&hdev->cmd_q);
1694
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001695 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001696 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001698 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700 if (hdev->flush)
1701 hdev->flush(hdev);
1702
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001703 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001704 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
1706 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001707 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 hci_req_unlock(hdev);
1711 hci_dev_put(hdev);
1712 return ret;
1713}
1714
1715int hci_dev_reset_stat(__u16 dev)
1716{
1717 struct hci_dev *hdev;
1718 int ret = 0;
1719
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001720 hdev = hci_dev_get(dev);
1721 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 return -ENODEV;
1723
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001724 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1725 ret = -EBUSY;
1726 goto done;
1727 }
1728
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1730
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001731done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 return ret;
1734}
1735
1736int hci_dev_cmd(unsigned int cmd, void __user *arg)
1737{
1738 struct hci_dev *hdev;
1739 struct hci_dev_req dr;
1740 int err = 0;
1741
1742 if (copy_from_user(&dr, arg, sizeof(dr)))
1743 return -EFAULT;
1744
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001745 hdev = hci_dev_get(dr.dev_id);
1746 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return -ENODEV;
1748
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001749 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1750 err = -EBUSY;
1751 goto done;
1752 }
1753
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001754 if (hdev->dev_type != HCI_BREDR) {
1755 err = -EOPNOTSUPP;
1756 goto done;
1757 }
1758
Johan Hedberg56f87902013-10-02 13:43:13 +03001759 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1760 err = -EOPNOTSUPP;
1761 goto done;
1762 }
1763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 switch (cmd) {
1765 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001766 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1767 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 break;
1769
1770 case HCISETENCRYPT:
1771 if (!lmp_encrypt_capable(hdev)) {
1772 err = -EOPNOTSUPP;
1773 break;
1774 }
1775
1776 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1777 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001778 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1779 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 if (err)
1781 break;
1782 }
1783
Johan Hedberg01178cd2013-03-05 20:37:41 +02001784 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1785 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 break;
1787
1788 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001789 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1790 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 break;
1792
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001793 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001794 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1795 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001796 break;
1797
1798 case HCISETLINKMODE:
1799 hdev->link_mode = ((__u16) dr.dev_opt) &
1800 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1801 break;
1802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 case HCISETPTYPE:
1804 hdev->pkt_type = (__u16) dr.dev_opt;
1805 break;
1806
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001808 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1809 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 break;
1811
1812 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001813 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1814 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 break;
1816
1817 default:
1818 err = -EINVAL;
1819 break;
1820 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001821
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001822done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 hci_dev_put(hdev);
1824 return err;
1825}
1826
1827int hci_get_dev_list(void __user *arg)
1828{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001829 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 struct hci_dev_list_req *dl;
1831 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 int n = 0, size, err;
1833 __u16 dev_num;
1834
1835 if (get_user(dev_num, (__u16 __user *) arg))
1836 return -EFAULT;
1837
1838 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1839 return -EINVAL;
1840
1841 size = sizeof(*dl) + dev_num * sizeof(*dr);
1842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001843 dl = kzalloc(size, GFP_KERNEL);
1844 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 return -ENOMEM;
1846
1847 dr = dl->dev_req;
1848
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001849 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001850 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001851 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001852 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001853
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001854 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1855 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 (dr + n)->dev_id = hdev->id;
1858 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 if (++n >= dev_num)
1861 break;
1862 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001863 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
1865 dl->dev_num = n;
1866 size = sizeof(*dl) + n * sizeof(*dr);
1867
1868 err = copy_to_user(arg, dl, size);
1869 kfree(dl);
1870
1871 return err ? -EFAULT : 0;
1872}
1873
1874int hci_get_dev_info(void __user *arg)
1875{
1876 struct hci_dev *hdev;
1877 struct hci_dev_info di;
1878 int err = 0;
1879
1880 if (copy_from_user(&di, arg, sizeof(di)))
1881 return -EFAULT;
1882
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001883 hdev = hci_dev_get(di.dev_id);
1884 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 return -ENODEV;
1886
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001887 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001888 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001889
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001890 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1891 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001892
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 strcpy(di.name, hdev->name);
1894 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001895 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 di.flags = hdev->flags;
1897 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001898 if (lmp_bredr_capable(hdev)) {
1899 di.acl_mtu = hdev->acl_mtu;
1900 di.acl_pkts = hdev->acl_pkts;
1901 di.sco_mtu = hdev->sco_mtu;
1902 di.sco_pkts = hdev->sco_pkts;
1903 } else {
1904 di.acl_mtu = hdev->le_mtu;
1905 di.acl_pkts = hdev->le_pkts;
1906 di.sco_mtu = 0;
1907 di.sco_pkts = 0;
1908 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 di.link_policy = hdev->link_policy;
1910 di.link_mode = hdev->link_mode;
1911
1912 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1913 memcpy(&di.features, &hdev->features, sizeof(di.features));
1914
1915 if (copy_to_user(arg, &di, sizeof(di)))
1916 err = -EFAULT;
1917
1918 hci_dev_put(hdev);
1919
1920 return err;
1921}
1922
1923/* ---- Interface to HCI drivers ---- */
1924
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001925static int hci_rfkill_set_block(void *data, bool blocked)
1926{
1927 struct hci_dev *hdev = data;
1928
1929 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1930
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001931 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1932 return -EBUSY;
1933
Johan Hedberg5e130362013-09-13 08:58:17 +03001934 if (blocked) {
1935 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001936 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1937 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001938 } else {
1939 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001940 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001941
1942 return 0;
1943}
1944
1945static const struct rfkill_ops hci_rfkill_ops = {
1946 .set_block = hci_rfkill_set_block,
1947};
1948
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001949static void hci_power_on(struct work_struct *work)
1950{
1951 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001952 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001953
1954 BT_DBG("%s", hdev->name);
1955
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001956 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001957 if (err < 0) {
1958 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001959 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001960 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001961
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001962 /* During the HCI setup phase, a few error conditions are
1963 * ignored and they need to be checked now. If they are still
1964 * valid, it is important to turn the device back off.
1965 */
1966 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1967 (hdev->dev_type == HCI_BREDR &&
1968 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1969 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001970 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1971 hci_dev_do_close(hdev);
1972 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001973 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1974 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001975 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001976
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001977 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001978 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001979}
1980
1981static void hci_power_off(struct work_struct *work)
1982{
Johan Hedberg32435532011-11-07 22:16:04 +02001983 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001984 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001985
1986 BT_DBG("%s", hdev->name);
1987
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001988 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001989}
1990
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001991static void hci_discov_off(struct work_struct *work)
1992{
1993 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001994
1995 hdev = container_of(work, struct hci_dev, discov_off.work);
1996
1997 BT_DBG("%s", hdev->name);
1998
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001999 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002000}
2001
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002002int hci_uuids_clear(struct hci_dev *hdev)
2003{
Johan Hedberg48210022013-01-27 00:31:28 +02002004 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002005
Johan Hedberg48210022013-01-27 00:31:28 +02002006 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2007 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002008 kfree(uuid);
2009 }
2010
2011 return 0;
2012}
2013
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002014int hci_link_keys_clear(struct hci_dev *hdev)
2015{
2016 struct list_head *p, *n;
2017
2018 list_for_each_safe(p, n, &hdev->link_keys) {
2019 struct link_key *key;
2020
2021 key = list_entry(p, struct link_key, list);
2022
2023 list_del(p);
2024 kfree(key);
2025 }
2026
2027 return 0;
2028}
2029
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002030int hci_smp_ltks_clear(struct hci_dev *hdev)
2031{
2032 struct smp_ltk *k, *tmp;
2033
2034 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2035 list_del(&k->list);
2036 kfree(k);
2037 }
2038
2039 return 0;
2040}
2041
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002042struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2043{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002044 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002045
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002046 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002047 if (bacmp(bdaddr, &k->bdaddr) == 0)
2048 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002049
2050 return NULL;
2051}
2052
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302053static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002054 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002055{
2056 /* Legacy key */
2057 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302058 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002059
2060 /* Debug keys are insecure so don't store them persistently */
2061 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302062 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002063
2064 /* Changed combination key and there's no previous one */
2065 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302066 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002067
2068 /* Security mode 3 case */
2069 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302070 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002071
2072 /* Neither local nor remote side had no-bonding as requirement */
2073 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302074 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002075
2076 /* Local side had dedicated bonding as requirement */
2077 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302078 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002079
2080 /* Remote side had dedicated bonding as requirement */
2081 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302082 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002083
2084 /* If none of the above criteria match, then don't store the key
2085 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302086 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002087}
2088
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002089struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002090{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002091 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002092
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002093 list_for_each_entry(k, &hdev->long_term_keys, list) {
2094 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002095 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002096 continue;
2097
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002098 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002099 }
2100
2101 return NULL;
2102}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002103
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002104struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002105 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002106{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002107 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002108
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002109 list_for_each_entry(k, &hdev->long_term_keys, list)
2110 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002111 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002112 return k;
2113
2114 return NULL;
2115}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002116
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002117int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002118 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002119{
2120 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302121 u8 old_key_type;
2122 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002123
2124 old_key = hci_find_link_key(hdev, bdaddr);
2125 if (old_key) {
2126 old_key_type = old_key->type;
2127 key = old_key;
2128 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002129 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002130 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2131 if (!key)
2132 return -ENOMEM;
2133 list_add(&key->list, &hdev->link_keys);
2134 }
2135
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002136 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002137
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002138 /* Some buggy controller combinations generate a changed
2139 * combination key for legacy pairing even when there's no
2140 * previous key */
2141 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002142 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002143 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002144 if (conn)
2145 conn->key_type = type;
2146 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002147
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002148 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002149 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002150 key->pin_len = pin_len;
2151
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002152 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002153 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002154 else
2155 key->type = type;
2156
Johan Hedberg4df378a2011-04-28 11:29:03 -07002157 if (!new_key)
2158 return 0;
2159
2160 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2161
Johan Hedberg744cf192011-11-08 20:40:14 +02002162 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002163
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302164 if (conn)
2165 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002166
2167 return 0;
2168}
2169
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002170int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002171 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002172 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002173{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002174 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002175
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002176 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2177 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002178
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002179 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2180 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002181 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002182 else {
2183 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002184 if (!key)
2185 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002186 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002187 }
2188
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002189 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002190 key->bdaddr_type = addr_type;
2191 memcpy(key->val, tk, sizeof(key->val));
2192 key->authenticated = authenticated;
2193 key->ediv = ediv;
2194 key->enc_size = enc_size;
2195 key->type = type;
2196 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002197
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002198 if (!new_key)
2199 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002200
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002201 if (type & HCI_SMP_LTK)
2202 mgmt_new_ltk(hdev, key, 1);
2203
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002204 return 0;
2205}
2206
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002207int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2208{
2209 struct link_key *key;
2210
2211 key = hci_find_link_key(hdev, bdaddr);
2212 if (!key)
2213 return -ENOENT;
2214
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002215 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002216
2217 list_del(&key->list);
2218 kfree(key);
2219
2220 return 0;
2221}
2222
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002223int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2224{
2225 struct smp_ltk *k, *tmp;
2226
2227 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2228 if (bacmp(bdaddr, &k->bdaddr))
2229 continue;
2230
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002231 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002232
2233 list_del(&k->list);
2234 kfree(k);
2235 }
2236
2237 return 0;
2238}
2239
Ville Tervo6bd32322011-02-16 16:32:41 +02002240/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002241static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002242{
2243 struct hci_dev *hdev = (void *) arg;
2244
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002245 if (hdev->sent_cmd) {
2246 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2247 u16 opcode = __le16_to_cpu(sent->opcode);
2248
2249 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2250 } else {
2251 BT_ERR("%s command tx timeout", hdev->name);
2252 }
2253
Ville Tervo6bd32322011-02-16 16:32:41 +02002254 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002255 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002256}
2257
Szymon Janc2763eda2011-03-22 13:12:22 +01002258struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002259 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002260{
2261 struct oob_data *data;
2262
2263 list_for_each_entry(data, &hdev->remote_oob_data, list)
2264 if (bacmp(bdaddr, &data->bdaddr) == 0)
2265 return data;
2266
2267 return NULL;
2268}
2269
2270int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2271{
2272 struct oob_data *data;
2273
2274 data = hci_find_remote_oob_data(hdev, bdaddr);
2275 if (!data)
2276 return -ENOENT;
2277
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002278 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002279
2280 list_del(&data->list);
2281 kfree(data);
2282
2283 return 0;
2284}
2285
2286int hci_remote_oob_data_clear(struct hci_dev *hdev)
2287{
2288 struct oob_data *data, *n;
2289
2290 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2291 list_del(&data->list);
2292 kfree(data);
2293 }
2294
2295 return 0;
2296}
2297
2298int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002299 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002300{
2301 struct oob_data *data;
2302
2303 data = hci_find_remote_oob_data(hdev, bdaddr);
2304
2305 if (!data) {
2306 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2307 if (!data)
2308 return -ENOMEM;
2309
2310 bacpy(&data->bdaddr, bdaddr);
2311 list_add(&data->list, &hdev->remote_oob_data);
2312 }
2313
2314 memcpy(data->hash, hash, sizeof(data->hash));
2315 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2316
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002317 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002318
2319 return 0;
2320}
2321
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002322struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2323 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002324{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002325 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002326
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002327 list_for_each_entry(b, &hdev->blacklist, list) {
2328 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002329 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002330 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002331
2332 return NULL;
2333}
2334
2335int hci_blacklist_clear(struct hci_dev *hdev)
2336{
2337 struct list_head *p, *n;
2338
2339 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002340 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002341
2342 list_del(p);
2343 kfree(b);
2344 }
2345
2346 return 0;
2347}
2348
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002349int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002350{
2351 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002352
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002353 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002354 return -EBADF;
2355
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002356 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002357 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002358
2359 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002360 if (!entry)
2361 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002362
2363 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002364 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002365
2366 list_add(&entry->list, &hdev->blacklist);
2367
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002368 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002369}
2370
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002371int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002372{
2373 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002374
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002375 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002376 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002377
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002378 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002379 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002380 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002381
2382 list_del(&entry->list);
2383 kfree(entry);
2384
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002385 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002386}
2387
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002388static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002389{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002390 if (status) {
2391 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002392
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002393 hci_dev_lock(hdev);
2394 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2395 hci_dev_unlock(hdev);
2396 return;
2397 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002398}
2399
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002400static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002401{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002402 /* General inquiry access code (GIAC) */
2403 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2404 struct hci_request req;
2405 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002406 int err;
2407
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002408 if (status) {
2409 BT_ERR("Failed to disable LE scanning: status %d", status);
2410 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002411 }
2412
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002413 switch (hdev->discovery.type) {
2414 case DISCOV_TYPE_LE:
2415 hci_dev_lock(hdev);
2416 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2417 hci_dev_unlock(hdev);
2418 break;
2419
2420 case DISCOV_TYPE_INTERLEAVED:
2421 hci_req_init(&req, hdev);
2422
2423 memset(&cp, 0, sizeof(cp));
2424 memcpy(&cp.lap, lap, sizeof(cp.lap));
2425 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2426 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2427
2428 hci_dev_lock(hdev);
2429
2430 hci_inquiry_cache_flush(hdev);
2431
2432 err = hci_req_run(&req, inquiry_complete);
2433 if (err) {
2434 BT_ERR("Inquiry request failed: err %d", err);
2435 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2436 }
2437
2438 hci_dev_unlock(hdev);
2439 break;
2440 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002441}
2442
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002443static void le_scan_disable_work(struct work_struct *work)
2444{
2445 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002446 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002447 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002448 struct hci_request req;
2449 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002450
2451 BT_DBG("%s", hdev->name);
2452
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002453 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002454
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002455 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002456 cp.enable = LE_SCAN_DISABLE;
2457 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002458
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002459 err = hci_req_run(&req, le_scan_disable_work_complete);
2460 if (err)
2461 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002462}
2463
David Herrmann9be0dab2012-04-22 14:39:57 +02002464/* Alloc HCI device */
2465struct hci_dev *hci_alloc_dev(void)
2466{
2467 struct hci_dev *hdev;
2468
2469 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2470 if (!hdev)
2471 return NULL;
2472
David Herrmannb1b813d2012-04-22 14:39:58 +02002473 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2474 hdev->esco_type = (ESCO_HV1);
2475 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002476 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2477 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002478 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2479 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002480
David Herrmannb1b813d2012-04-22 14:39:58 +02002481 hdev->sniff_max_interval = 800;
2482 hdev->sniff_min_interval = 80;
2483
Marcel Holtmannbef64732013-10-11 08:23:19 -07002484 hdev->le_scan_interval = 0x0060;
2485 hdev->le_scan_window = 0x0030;
2486
David Herrmannb1b813d2012-04-22 14:39:58 +02002487 mutex_init(&hdev->lock);
2488 mutex_init(&hdev->req_lock);
2489
2490 INIT_LIST_HEAD(&hdev->mgmt_pending);
2491 INIT_LIST_HEAD(&hdev->blacklist);
2492 INIT_LIST_HEAD(&hdev->uuids);
2493 INIT_LIST_HEAD(&hdev->link_keys);
2494 INIT_LIST_HEAD(&hdev->long_term_keys);
2495 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002496 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002497
2498 INIT_WORK(&hdev->rx_work, hci_rx_work);
2499 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2500 INIT_WORK(&hdev->tx_work, hci_tx_work);
2501 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002502
David Herrmannb1b813d2012-04-22 14:39:58 +02002503 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2504 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2505 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2506
David Herrmannb1b813d2012-04-22 14:39:58 +02002507 skb_queue_head_init(&hdev->rx_q);
2508 skb_queue_head_init(&hdev->cmd_q);
2509 skb_queue_head_init(&hdev->raw_q);
2510
2511 init_waitqueue_head(&hdev->req_wait_q);
2512
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002513 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002514
David Herrmannb1b813d2012-04-22 14:39:58 +02002515 hci_init_sysfs(hdev);
2516 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002517
2518 return hdev;
2519}
2520EXPORT_SYMBOL(hci_alloc_dev);
2521
2522/* Free HCI device */
2523void hci_free_dev(struct hci_dev *hdev)
2524{
David Herrmann9be0dab2012-04-22 14:39:57 +02002525 /* will free via device release */
2526 put_device(&hdev->dev);
2527}
2528EXPORT_SYMBOL(hci_free_dev);
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530/* Register HCI device */
2531int hci_register_dev(struct hci_dev *hdev)
2532{
David Herrmannb1b813d2012-04-22 14:39:58 +02002533 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534
David Herrmann010666a2012-01-07 15:47:07 +01002535 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 return -EINVAL;
2537
Mat Martineau08add512011-11-02 16:18:36 -07002538 /* Do not allow HCI_AMP devices to register at index 0,
2539 * so the index can be used as the AMP controller ID.
2540 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002541 switch (hdev->dev_type) {
2542 case HCI_BREDR:
2543 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2544 break;
2545 case HCI_AMP:
2546 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2547 break;
2548 default:
2549 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002551
Sasha Levin3df92b32012-05-27 22:36:56 +02002552 if (id < 0)
2553 return id;
2554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 sprintf(hdev->name, "hci%d", id);
2556 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002557
2558 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2559
Kees Cookd8537542013-07-03 15:04:57 -07002560 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2561 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002562 if (!hdev->workqueue) {
2563 error = -ENOMEM;
2564 goto err;
2565 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002566
Kees Cookd8537542013-07-03 15:04:57 -07002567 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2568 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002569 if (!hdev->req_workqueue) {
2570 destroy_workqueue(hdev->workqueue);
2571 error = -ENOMEM;
2572 goto err;
2573 }
2574
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002575 if (!IS_ERR_OR_NULL(bt_debugfs))
2576 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2577
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002578 dev_set_name(&hdev->dev, "%s", hdev->name);
2579
2580 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002581 if (error < 0)
2582 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002584 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002585 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2586 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002587 if (hdev->rfkill) {
2588 if (rfkill_register(hdev->rfkill) < 0) {
2589 rfkill_destroy(hdev->rfkill);
2590 hdev->rfkill = NULL;
2591 }
2592 }
2593
Johan Hedberg5e130362013-09-13 08:58:17 +03002594 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2595 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2596
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002597 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002598 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002599
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002600 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002601 /* Assume BR/EDR support until proven otherwise (such as
2602 * through reading supported features during init.
2603 */
2604 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2605 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002606
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002607 write_lock(&hci_dev_list_lock);
2608 list_add(&hdev->list, &hci_dev_list);
2609 write_unlock(&hci_dev_list_lock);
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002612 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613
Johan Hedberg19202572013-01-14 22:33:51 +02002614 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002615
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002617
David Herrmann33ca9542011-10-08 14:58:49 +02002618err_wqueue:
2619 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002620 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002621err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002622 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002623
David Herrmann33ca9542011-10-08 14:58:49 +02002624 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625}
2626EXPORT_SYMBOL(hci_register_dev);
2627
2628/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002629void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630{
Sasha Levin3df92b32012-05-27 22:36:56 +02002631 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002632
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002633 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
Johan Hovold94324962012-03-15 14:48:41 +01002635 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2636
Sasha Levin3df92b32012-05-27 22:36:56 +02002637 id = hdev->id;
2638
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002639 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002641 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642
2643 hci_dev_do_close(hdev);
2644
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302645 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002646 kfree_skb(hdev->reassembly[i]);
2647
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002648 cancel_work_sync(&hdev->power_on);
2649
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002650 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002651 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002652 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002653 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002654 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002655 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002656
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002657 /* mgmt_index_removed should take care of emptying the
2658 * pending list */
2659 BUG_ON(!list_empty(&hdev->mgmt_pending));
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 hci_notify(hdev, HCI_DEV_UNREG);
2662
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002663 if (hdev->rfkill) {
2664 rfkill_unregister(hdev->rfkill);
2665 rfkill_destroy(hdev->rfkill);
2666 }
2667
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002668 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002669
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002670 debugfs_remove_recursive(hdev->debugfs);
2671
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002672 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002673 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002674
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002675 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002676 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002677 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002678 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002679 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002680 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002681 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002682
David Herrmanndc946bd2012-01-07 15:47:24 +01002683 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002684
2685 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686}
2687EXPORT_SYMBOL(hci_unregister_dev);
2688
2689/* Suspend HCI device */
2690int hci_suspend_dev(struct hci_dev *hdev)
2691{
2692 hci_notify(hdev, HCI_DEV_SUSPEND);
2693 return 0;
2694}
2695EXPORT_SYMBOL(hci_suspend_dev);
2696
2697/* Resume HCI device */
2698int hci_resume_dev(struct hci_dev *hdev)
2699{
2700 hci_notify(hdev, HCI_DEV_RESUME);
2701 return 0;
2702}
2703EXPORT_SYMBOL(hci_resume_dev);
2704
Marcel Holtmann76bca882009-11-18 00:40:39 +01002705/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002706int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002707{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002708 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002709 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002710 kfree_skb(skb);
2711 return -ENXIO;
2712 }
2713
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002714 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002715 bt_cb(skb)->incoming = 1;
2716
2717 /* Time stamp */
2718 __net_timestamp(skb);
2719
Marcel Holtmann76bca882009-11-18 00:40:39 +01002720 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002721 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002722
Marcel Holtmann76bca882009-11-18 00:40:39 +01002723 return 0;
2724}
2725EXPORT_SYMBOL(hci_recv_frame);
2726
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302727static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002728 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302729{
2730 int len = 0;
2731 int hlen = 0;
2732 int remain = count;
2733 struct sk_buff *skb;
2734 struct bt_skb_cb *scb;
2735
2736 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002737 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302738 return -EILSEQ;
2739
2740 skb = hdev->reassembly[index];
2741
2742 if (!skb) {
2743 switch (type) {
2744 case HCI_ACLDATA_PKT:
2745 len = HCI_MAX_FRAME_SIZE;
2746 hlen = HCI_ACL_HDR_SIZE;
2747 break;
2748 case HCI_EVENT_PKT:
2749 len = HCI_MAX_EVENT_SIZE;
2750 hlen = HCI_EVENT_HDR_SIZE;
2751 break;
2752 case HCI_SCODATA_PKT:
2753 len = HCI_MAX_SCO_SIZE;
2754 hlen = HCI_SCO_HDR_SIZE;
2755 break;
2756 }
2757
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002758 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302759 if (!skb)
2760 return -ENOMEM;
2761
2762 scb = (void *) skb->cb;
2763 scb->expect = hlen;
2764 scb->pkt_type = type;
2765
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302766 hdev->reassembly[index] = skb;
2767 }
2768
2769 while (count) {
2770 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002771 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302772
2773 memcpy(skb_put(skb, len), data, len);
2774
2775 count -= len;
2776 data += len;
2777 scb->expect -= len;
2778 remain = count;
2779
2780 switch (type) {
2781 case HCI_EVENT_PKT:
2782 if (skb->len == HCI_EVENT_HDR_SIZE) {
2783 struct hci_event_hdr *h = hci_event_hdr(skb);
2784 scb->expect = h->plen;
2785
2786 if (skb_tailroom(skb) < scb->expect) {
2787 kfree_skb(skb);
2788 hdev->reassembly[index] = NULL;
2789 return -ENOMEM;
2790 }
2791 }
2792 break;
2793
2794 case HCI_ACLDATA_PKT:
2795 if (skb->len == HCI_ACL_HDR_SIZE) {
2796 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2797 scb->expect = __le16_to_cpu(h->dlen);
2798
2799 if (skb_tailroom(skb) < scb->expect) {
2800 kfree_skb(skb);
2801 hdev->reassembly[index] = NULL;
2802 return -ENOMEM;
2803 }
2804 }
2805 break;
2806
2807 case HCI_SCODATA_PKT:
2808 if (skb->len == HCI_SCO_HDR_SIZE) {
2809 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2810 scb->expect = h->dlen;
2811
2812 if (skb_tailroom(skb) < scb->expect) {
2813 kfree_skb(skb);
2814 hdev->reassembly[index] = NULL;
2815 return -ENOMEM;
2816 }
2817 }
2818 break;
2819 }
2820
2821 if (scb->expect == 0) {
2822 /* Complete frame */
2823
2824 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002825 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302826
2827 hdev->reassembly[index] = NULL;
2828 return remain;
2829 }
2830 }
2831
2832 return remain;
2833}
2834
Marcel Holtmannef222012007-07-11 06:42:04 +02002835int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2836{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302837 int rem = 0;
2838
Marcel Holtmannef222012007-07-11 06:42:04 +02002839 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2840 return -EILSEQ;
2841
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002842 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002843 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302844 if (rem < 0)
2845 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002846
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302847 data += (count - rem);
2848 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002849 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002850
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302851 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002852}
2853EXPORT_SYMBOL(hci_recv_fragment);
2854
Suraj Sumangala99811512010-07-14 13:02:19 +05302855#define STREAM_REASSEMBLY 0
2856
2857int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2858{
2859 int type;
2860 int rem = 0;
2861
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002862 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302863 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2864
2865 if (!skb) {
2866 struct { char type; } *pkt;
2867
2868 /* Start of the frame */
2869 pkt = data;
2870 type = pkt->type;
2871
2872 data++;
2873 count--;
2874 } else
2875 type = bt_cb(skb)->pkt_type;
2876
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002877 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002878 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302879 if (rem < 0)
2880 return rem;
2881
2882 data += (count - rem);
2883 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002884 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302885
2886 return rem;
2887}
2888EXPORT_SYMBOL(hci_recv_stream_fragment);
2889
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890/* ---- Interface to upper protocols ---- */
2891
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892int hci_register_cb(struct hci_cb *cb)
2893{
2894 BT_DBG("%p name %s", cb, cb->name);
2895
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002896 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002898 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
2900 return 0;
2901}
2902EXPORT_SYMBOL(hci_register_cb);
2903
2904int hci_unregister_cb(struct hci_cb *cb)
2905{
2906 BT_DBG("%p name %s", cb, cb->name);
2907
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002908 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002910 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
2912 return 0;
2913}
2914EXPORT_SYMBOL(hci_unregister_cb);
2915
Marcel Holtmann51086992013-10-10 14:54:19 -07002916static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002918 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002920 /* Time stamp */
2921 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002923 /* Send copy to monitor */
2924 hci_send_to_monitor(hdev, skb);
2925
2926 if (atomic_read(&hdev->promisc)) {
2927 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002928 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 }
2930
2931 /* Get rid of skb owner, prior to sending to the driver. */
2932 skb_orphan(skb);
2933
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002934 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002935 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936}
2937
Johan Hedberg3119ae92013-03-05 20:37:44 +02002938void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2939{
2940 skb_queue_head_init(&req->cmd_q);
2941 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002942 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002943}
2944
2945int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2946{
2947 struct hci_dev *hdev = req->hdev;
2948 struct sk_buff *skb;
2949 unsigned long flags;
2950
2951 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2952
Andre Guedes5d73e032013-03-08 11:20:16 -03002953 /* If an error occured during request building, remove all HCI
2954 * commands queued on the HCI request queue.
2955 */
2956 if (req->err) {
2957 skb_queue_purge(&req->cmd_q);
2958 return req->err;
2959 }
2960
Johan Hedberg3119ae92013-03-05 20:37:44 +02002961 /* Do not allow empty requests */
2962 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002963 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002964
2965 skb = skb_peek_tail(&req->cmd_q);
2966 bt_cb(skb)->req.complete = complete;
2967
2968 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2969 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2970 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2971
2972 queue_work(hdev->workqueue, &hdev->cmd_work);
2973
2974 return 0;
2975}
2976
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002977static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002978 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979{
2980 int len = HCI_COMMAND_HDR_SIZE + plen;
2981 struct hci_command_hdr *hdr;
2982 struct sk_buff *skb;
2983
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002985 if (!skb)
2986 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987
2988 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002989 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 hdr->plen = plen;
2991
2992 if (plen)
2993 memcpy(skb_put(skb, plen), param, plen);
2994
2995 BT_DBG("skb len %d", skb->len);
2996
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002997 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002998
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002999 return skb;
3000}
3001
3002/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003003int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3004 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003005{
3006 struct sk_buff *skb;
3007
3008 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3009
3010 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3011 if (!skb) {
3012 BT_ERR("%s no memory for command", hdev->name);
3013 return -ENOMEM;
3014 }
3015
Johan Hedberg11714b32013-03-05 20:37:47 +02003016 /* Stand-alone HCI commands must be flaged as
3017 * single-command requests.
3018 */
3019 bt_cb(skb)->req.start = true;
3020
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003022 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023
3024 return 0;
3025}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
Johan Hedberg71c76a12013-03-05 20:37:46 +02003027/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003028void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3029 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003030{
3031 struct hci_dev *hdev = req->hdev;
3032 struct sk_buff *skb;
3033
3034 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3035
Andre Guedes34739c12013-03-08 11:20:18 -03003036 /* If an error occured during request building, there is no point in
3037 * queueing the HCI command. We can simply return.
3038 */
3039 if (req->err)
3040 return;
3041
Johan Hedberg71c76a12013-03-05 20:37:46 +02003042 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3043 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003044 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3045 hdev->name, opcode);
3046 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003047 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003048 }
3049
3050 if (skb_queue_empty(&req->cmd_q))
3051 bt_cb(skb)->req.start = true;
3052
Johan Hedberg02350a72013-04-03 21:50:29 +03003053 bt_cb(skb)->req.event = event;
3054
Johan Hedberg71c76a12013-03-05 20:37:46 +02003055 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003056}
3057
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003058void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3059 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003060{
3061 hci_req_add_ev(req, opcode, plen, param, 0);
3062}
3063
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003065void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066{
3067 struct hci_command_hdr *hdr;
3068
3069 if (!hdev->sent_cmd)
3070 return NULL;
3071
3072 hdr = (void *) hdev->sent_cmd->data;
3073
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003074 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 return NULL;
3076
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003077 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
3079 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3080}
3081
3082/* Send ACL data */
3083static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3084{
3085 struct hci_acl_hdr *hdr;
3086 int len = skb->len;
3087
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003088 skb_push(skb, HCI_ACL_HDR_SIZE);
3089 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003090 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003091 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3092 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093}
3094
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003095static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003096 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003098 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 struct hci_dev *hdev = conn->hdev;
3100 struct sk_buff *list;
3101
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003102 skb->len = skb_headlen(skb);
3103 skb->data_len = 0;
3104
3105 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003106
3107 switch (hdev->dev_type) {
3108 case HCI_BREDR:
3109 hci_add_acl_hdr(skb, conn->handle, flags);
3110 break;
3111 case HCI_AMP:
3112 hci_add_acl_hdr(skb, chan->handle, flags);
3113 break;
3114 default:
3115 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3116 return;
3117 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003118
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003119 list = skb_shinfo(skb)->frag_list;
3120 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 /* Non fragmented */
3122 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3123
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003124 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 } else {
3126 /* Fragmented */
3127 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3128
3129 skb_shinfo(skb)->frag_list = NULL;
3130
3131 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003132 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003134 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003135
3136 flags &= ~ACL_START;
3137 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 do {
3139 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003140
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003141 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003142 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
3144 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3145
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003146 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 } while (list);
3148
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003149 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003151}
3152
3153void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3154{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003155 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003156
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003157 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003158
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003159 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003161 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163
3164/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003165void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166{
3167 struct hci_dev *hdev = conn->hdev;
3168 struct hci_sco_hdr hdr;
3169
3170 BT_DBG("%s len %d", hdev->name, skb->len);
3171
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003172 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 hdr.dlen = skb->len;
3174
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003175 skb_push(skb, HCI_SCO_HDR_SIZE);
3176 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003177 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003179 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003180
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003182 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184
3185/* ---- HCI TX task (outgoing data) ---- */
3186
3187/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003188static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3189 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190{
3191 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003192 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003193 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003195 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003197
3198 rcu_read_lock();
3199
3200 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003201 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003203
3204 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3205 continue;
3206
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 num++;
3208
3209 if (c->sent < min) {
3210 min = c->sent;
3211 conn = c;
3212 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003213
3214 if (hci_conn_num(hdev, type) == num)
3215 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 }
3217
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003218 rcu_read_unlock();
3219
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003221 int cnt, q;
3222
3223 switch (conn->type) {
3224 case ACL_LINK:
3225 cnt = hdev->acl_cnt;
3226 break;
3227 case SCO_LINK:
3228 case ESCO_LINK:
3229 cnt = hdev->sco_cnt;
3230 break;
3231 case LE_LINK:
3232 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3233 break;
3234 default:
3235 cnt = 0;
3236 BT_ERR("Unknown link type");
3237 }
3238
3239 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 *quote = q ? q : 1;
3241 } else
3242 *quote = 0;
3243
3244 BT_DBG("conn %p quote %d", conn, *quote);
3245 return conn;
3246}
3247
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003248static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249{
3250 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003251 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252
Ville Tervobae1f5d92011-02-10 22:38:53 -03003253 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003255 rcu_read_lock();
3256
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003258 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003259 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003260 BT_ERR("%s killing stalled connection %pMR",
3261 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003262 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 }
3264 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003265
3266 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267}
3268
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003269static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3270 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003271{
3272 struct hci_conn_hash *h = &hdev->conn_hash;
3273 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003274 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003275 struct hci_conn *conn;
3276 int cnt, q, conn_num = 0;
3277
3278 BT_DBG("%s", hdev->name);
3279
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003280 rcu_read_lock();
3281
3282 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003283 struct hci_chan *tmp;
3284
3285 if (conn->type != type)
3286 continue;
3287
3288 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3289 continue;
3290
3291 conn_num++;
3292
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003293 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003294 struct sk_buff *skb;
3295
3296 if (skb_queue_empty(&tmp->data_q))
3297 continue;
3298
3299 skb = skb_peek(&tmp->data_q);
3300 if (skb->priority < cur_prio)
3301 continue;
3302
3303 if (skb->priority > cur_prio) {
3304 num = 0;
3305 min = ~0;
3306 cur_prio = skb->priority;
3307 }
3308
3309 num++;
3310
3311 if (conn->sent < min) {
3312 min = conn->sent;
3313 chan = tmp;
3314 }
3315 }
3316
3317 if (hci_conn_num(hdev, type) == conn_num)
3318 break;
3319 }
3320
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003321 rcu_read_unlock();
3322
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003323 if (!chan)
3324 return NULL;
3325
3326 switch (chan->conn->type) {
3327 case ACL_LINK:
3328 cnt = hdev->acl_cnt;
3329 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003330 case AMP_LINK:
3331 cnt = hdev->block_cnt;
3332 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003333 case SCO_LINK:
3334 case ESCO_LINK:
3335 cnt = hdev->sco_cnt;
3336 break;
3337 case LE_LINK:
3338 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3339 break;
3340 default:
3341 cnt = 0;
3342 BT_ERR("Unknown link type");
3343 }
3344
3345 q = cnt / num;
3346 *quote = q ? q : 1;
3347 BT_DBG("chan %p quote %d", chan, *quote);
3348 return chan;
3349}
3350
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003351static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3352{
3353 struct hci_conn_hash *h = &hdev->conn_hash;
3354 struct hci_conn *conn;
3355 int num = 0;
3356
3357 BT_DBG("%s", hdev->name);
3358
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003359 rcu_read_lock();
3360
3361 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003362 struct hci_chan *chan;
3363
3364 if (conn->type != type)
3365 continue;
3366
3367 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3368 continue;
3369
3370 num++;
3371
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003372 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003373 struct sk_buff *skb;
3374
3375 if (chan->sent) {
3376 chan->sent = 0;
3377 continue;
3378 }
3379
3380 if (skb_queue_empty(&chan->data_q))
3381 continue;
3382
3383 skb = skb_peek(&chan->data_q);
3384 if (skb->priority >= HCI_PRIO_MAX - 1)
3385 continue;
3386
3387 skb->priority = HCI_PRIO_MAX - 1;
3388
3389 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003390 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003391 }
3392
3393 if (hci_conn_num(hdev, type) == num)
3394 break;
3395 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003396
3397 rcu_read_unlock();
3398
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003399}
3400
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003401static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3402{
3403 /* Calculate count of blocks used by this packet */
3404 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3405}
3406
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003407static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 if (!test_bit(HCI_RAW, &hdev->flags)) {
3410 /* ACL tx timeout must be longer than maximum
3411 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003412 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003413 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003414 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003416}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003418static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003419{
3420 unsigned int cnt = hdev->acl_cnt;
3421 struct hci_chan *chan;
3422 struct sk_buff *skb;
3423 int quote;
3424
3425 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003426
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003427 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003428 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003429 u32 priority = (skb_peek(&chan->data_q))->priority;
3430 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003431 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003432 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003433
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003434 /* Stop if priority has changed */
3435 if (skb->priority < priority)
3436 break;
3437
3438 skb = skb_dequeue(&chan->data_q);
3439
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003440 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003441 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003442
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003443 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 hdev->acl_last_tx = jiffies;
3445
3446 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003447 chan->sent++;
3448 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 }
3450 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003451
3452 if (cnt != hdev->acl_cnt)
3453 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454}
3455
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003456static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003457{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003458 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003459 struct hci_chan *chan;
3460 struct sk_buff *skb;
3461 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003462 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003463
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003464 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003465
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003466 BT_DBG("%s", hdev->name);
3467
3468 if (hdev->dev_type == HCI_AMP)
3469 type = AMP_LINK;
3470 else
3471 type = ACL_LINK;
3472
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003473 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003474 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003475 u32 priority = (skb_peek(&chan->data_q))->priority;
3476 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3477 int blocks;
3478
3479 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003480 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003481
3482 /* Stop if priority has changed */
3483 if (skb->priority < priority)
3484 break;
3485
3486 skb = skb_dequeue(&chan->data_q);
3487
3488 blocks = __get_blocks(hdev, skb);
3489 if (blocks > hdev->block_cnt)
3490 return;
3491
3492 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003493 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003494
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003495 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003496 hdev->acl_last_tx = jiffies;
3497
3498 hdev->block_cnt -= blocks;
3499 quote -= blocks;
3500
3501 chan->sent += blocks;
3502 chan->conn->sent += blocks;
3503 }
3504 }
3505
3506 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003507 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003508}
3509
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003510static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003511{
3512 BT_DBG("%s", hdev->name);
3513
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003514 /* No ACL link over BR/EDR controller */
3515 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3516 return;
3517
3518 /* No AMP link over AMP controller */
3519 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003520 return;
3521
3522 switch (hdev->flow_ctl_mode) {
3523 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3524 hci_sched_acl_pkt(hdev);
3525 break;
3526
3527 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3528 hci_sched_acl_blk(hdev);
3529 break;
3530 }
3531}
3532
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003534static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535{
3536 struct hci_conn *conn;
3537 struct sk_buff *skb;
3538 int quote;
3539
3540 BT_DBG("%s", hdev->name);
3541
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003542 if (!hci_conn_num(hdev, SCO_LINK))
3543 return;
3544
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3546 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3547 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003548 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
3550 conn->sent++;
3551 if (conn->sent == ~0)
3552 conn->sent = 0;
3553 }
3554 }
3555}
3556
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003557static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003558{
3559 struct hci_conn *conn;
3560 struct sk_buff *skb;
3561 int quote;
3562
3563 BT_DBG("%s", hdev->name);
3564
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003565 if (!hci_conn_num(hdev, ESCO_LINK))
3566 return;
3567
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003568 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3569 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003570 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3571 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003572 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003573
3574 conn->sent++;
3575 if (conn->sent == ~0)
3576 conn->sent = 0;
3577 }
3578 }
3579}
3580
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003581static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003582{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003583 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003584 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003585 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003586
3587 BT_DBG("%s", hdev->name);
3588
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003589 if (!hci_conn_num(hdev, LE_LINK))
3590 return;
3591
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003592 if (!test_bit(HCI_RAW, &hdev->flags)) {
3593 /* LE tx timeout must be longer than maximum
3594 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003595 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003596 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003597 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003598 }
3599
3600 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003601 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003602 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003603 u32 priority = (skb_peek(&chan->data_q))->priority;
3604 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003605 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003606 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003607
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003608 /* Stop if priority has changed */
3609 if (skb->priority < priority)
3610 break;
3611
3612 skb = skb_dequeue(&chan->data_q);
3613
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003614 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003615 hdev->le_last_tx = jiffies;
3616
3617 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003618 chan->sent++;
3619 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003620 }
3621 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003622
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003623 if (hdev->le_pkts)
3624 hdev->le_cnt = cnt;
3625 else
3626 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003627
3628 if (cnt != tmp)
3629 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003630}
3631
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003632static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003634 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 struct sk_buff *skb;
3636
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003637 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003638 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639
Marcel Holtmann52de5992013-09-03 18:08:38 -07003640 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3641 /* Schedule queues and send stuff to HCI driver */
3642 hci_sched_acl(hdev);
3643 hci_sched_sco(hdev);
3644 hci_sched_esco(hdev);
3645 hci_sched_le(hdev);
3646 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003647
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 /* Send next queued raw (unknown type) packet */
3649 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003650 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651}
3652
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003653/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654
3655/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003656static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657{
3658 struct hci_acl_hdr *hdr = (void *) skb->data;
3659 struct hci_conn *conn;
3660 __u16 handle, flags;
3661
3662 skb_pull(skb, HCI_ACL_HDR_SIZE);
3663
3664 handle = __le16_to_cpu(hdr->handle);
3665 flags = hci_flags(handle);
3666 handle = hci_handle(handle);
3667
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003668 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003669 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670
3671 hdev->stat.acl_rx++;
3672
3673 hci_dev_lock(hdev);
3674 conn = hci_conn_hash_lookup_handle(hdev, handle);
3675 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003676
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003678 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003679
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003681 l2cap_recv_acldata(conn, skb, flags);
3682 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003684 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003685 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 }
3687
3688 kfree_skb(skb);
3689}
3690
3691/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003692static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693{
3694 struct hci_sco_hdr *hdr = (void *) skb->data;
3695 struct hci_conn *conn;
3696 __u16 handle;
3697
3698 skb_pull(skb, HCI_SCO_HDR_SIZE);
3699
3700 handle = __le16_to_cpu(hdr->handle);
3701
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003702 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703
3704 hdev->stat.sco_rx++;
3705
3706 hci_dev_lock(hdev);
3707 conn = hci_conn_hash_lookup_handle(hdev, handle);
3708 hci_dev_unlock(hdev);
3709
3710 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003712 sco_recv_scodata(conn, skb);
3713 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003715 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003716 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 }
3718
3719 kfree_skb(skb);
3720}
3721
Johan Hedberg9238f362013-03-05 20:37:48 +02003722static bool hci_req_is_complete(struct hci_dev *hdev)
3723{
3724 struct sk_buff *skb;
3725
3726 skb = skb_peek(&hdev->cmd_q);
3727 if (!skb)
3728 return true;
3729
3730 return bt_cb(skb)->req.start;
3731}
3732
Johan Hedberg42c6b122013-03-05 20:37:49 +02003733static void hci_resend_last(struct hci_dev *hdev)
3734{
3735 struct hci_command_hdr *sent;
3736 struct sk_buff *skb;
3737 u16 opcode;
3738
3739 if (!hdev->sent_cmd)
3740 return;
3741
3742 sent = (void *) hdev->sent_cmd->data;
3743 opcode = __le16_to_cpu(sent->opcode);
3744 if (opcode == HCI_OP_RESET)
3745 return;
3746
3747 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3748 if (!skb)
3749 return;
3750
3751 skb_queue_head(&hdev->cmd_q, skb);
3752 queue_work(hdev->workqueue, &hdev->cmd_work);
3753}
3754
Johan Hedberg9238f362013-03-05 20:37:48 +02003755void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3756{
3757 hci_req_complete_t req_complete = NULL;
3758 struct sk_buff *skb;
3759 unsigned long flags;
3760
3761 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3762
Johan Hedberg42c6b122013-03-05 20:37:49 +02003763 /* If the completed command doesn't match the last one that was
3764 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003765 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003766 if (!hci_sent_cmd_data(hdev, opcode)) {
3767 /* Some CSR based controllers generate a spontaneous
3768 * reset complete event during init and any pending
3769 * command will never be completed. In such a case we
3770 * need to resend whatever was the last sent
3771 * command.
3772 */
3773 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3774 hci_resend_last(hdev);
3775
Johan Hedberg9238f362013-03-05 20:37:48 +02003776 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003777 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003778
3779 /* If the command succeeded and there's still more commands in
3780 * this request the request is not yet complete.
3781 */
3782 if (!status && !hci_req_is_complete(hdev))
3783 return;
3784
3785 /* If this was the last command in a request the complete
3786 * callback would be found in hdev->sent_cmd instead of the
3787 * command queue (hdev->cmd_q).
3788 */
3789 if (hdev->sent_cmd) {
3790 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003791
3792 if (req_complete) {
3793 /* We must set the complete callback to NULL to
3794 * avoid calling the callback more than once if
3795 * this function gets called again.
3796 */
3797 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3798
Johan Hedberg9238f362013-03-05 20:37:48 +02003799 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003800 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003801 }
3802
3803 /* Remove all pending commands belonging to this request */
3804 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3805 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3806 if (bt_cb(skb)->req.start) {
3807 __skb_queue_head(&hdev->cmd_q, skb);
3808 break;
3809 }
3810
3811 req_complete = bt_cb(skb)->req.complete;
3812 kfree_skb(skb);
3813 }
3814 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3815
3816call_complete:
3817 if (req_complete)
3818 req_complete(hdev, status);
3819}
3820
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003821static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003823 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 struct sk_buff *skb;
3825
3826 BT_DBG("%s", hdev->name);
3827
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003829 /* Send copy to monitor */
3830 hci_send_to_monitor(hdev, skb);
3831
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 if (atomic_read(&hdev->promisc)) {
3833 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003834 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 }
3836
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003837 if (test_bit(HCI_RAW, &hdev->flags) ||
3838 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 kfree_skb(skb);
3840 continue;
3841 }
3842
3843 if (test_bit(HCI_INIT, &hdev->flags)) {
3844 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003845 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 case HCI_ACLDATA_PKT:
3847 case HCI_SCODATA_PKT:
3848 kfree_skb(skb);
3849 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 }
3852
3853 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003854 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003856 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 hci_event_packet(hdev, skb);
3858 break;
3859
3860 case HCI_ACLDATA_PKT:
3861 BT_DBG("%s ACL data packet", hdev->name);
3862 hci_acldata_packet(hdev, skb);
3863 break;
3864
3865 case HCI_SCODATA_PKT:
3866 BT_DBG("%s SCO data packet", hdev->name);
3867 hci_scodata_packet(hdev, skb);
3868 break;
3869
3870 default:
3871 kfree_skb(skb);
3872 break;
3873 }
3874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875}
3876
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003877static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003879 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 struct sk_buff *skb;
3881
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003882 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3883 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003886 if (atomic_read(&hdev->cmd_cnt)) {
3887 skb = skb_dequeue(&hdev->cmd_q);
3888 if (!skb)
3889 return;
3890
Wei Yongjun7585b972009-02-25 18:29:52 +08003891 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003893 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003894 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003896 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003897 if (test_bit(HCI_RESET, &hdev->flags))
3898 del_timer(&hdev->cmd_timer);
3899 else
3900 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003901 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902 } else {
3903 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003904 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 }
3906 }
3907}