blob: 0fb9f6d1f60fb8b97f670025a113afae13a186aa [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070061static int blacklist_show(struct seq_file *f, void *p)
62{
63 struct hci_dev *hdev = f->private;
64 struct bdaddr_list *b;
65
66 hci_dev_lock(hdev);
67 list_for_each_entry(b, &hdev->blacklist, list)
68 seq_printf(f, "%pMR\n", &b->bdaddr);
69 hci_dev_unlock(hdev);
70
71 return 0;
72}
73
74static int blacklist_open(struct inode *inode, struct file *file)
75{
76 return single_open(file, blacklist_show, inode->i_private);
77}
78
79static const struct file_operations blacklist_fops = {
80 .open = blacklist_open,
81 .read = seq_read,
82 .llseek = seq_lseek,
83 .release = single_release,
84};
85
Marcel Holtmann47219832013-10-17 17:24:15 -070086static int uuids_show(struct seq_file *f, void *p)
87{
88 struct hci_dev *hdev = f->private;
89 struct bt_uuid *uuid;
90
91 hci_dev_lock(hdev);
92 list_for_each_entry(uuid, &hdev->uuids, list) {
93 u32 data0, data5;
94 u16 data1, data2, data3, data4;
95
96 data5 = get_unaligned_le32(uuid);
97 data4 = get_unaligned_le16(uuid + 4);
98 data3 = get_unaligned_le16(uuid + 6);
99 data2 = get_unaligned_le16(uuid + 8);
100 data1 = get_unaligned_le16(uuid + 10);
101 data0 = get_unaligned_le32(uuid + 12);
102
103 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
104 data0, data1, data2, data3, data4, data5);
105 }
106 hci_dev_unlock(hdev);
107
108 return 0;
109}
110
111static int uuids_open(struct inode *inode, struct file *file)
112{
113 return single_open(file, uuids_show, inode->i_private);
114}
115
116static const struct file_operations uuids_fops = {
117 .open = uuids_open,
118 .read = seq_read,
119 .llseek = seq_lseek,
120 .release = single_release,
121};
122
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700123static int inquiry_cache_show(struct seq_file *f, void *p)
124{
125 struct hci_dev *hdev = f->private;
126 struct discovery_state *cache = &hdev->discovery;
127 struct inquiry_entry *e;
128
129 hci_dev_lock(hdev);
130
131 list_for_each_entry(e, &cache->all, all) {
132 struct inquiry_data *data = &e->data;
133 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
134 &data->bdaddr,
135 data->pscan_rep_mode, data->pscan_period_mode,
136 data->pscan_mode, data->dev_class[2],
137 data->dev_class[1], data->dev_class[0],
138 __le16_to_cpu(data->clock_offset),
139 data->rssi, data->ssp_mode, e->timestamp);
140 }
141
142 hci_dev_unlock(hdev);
143
144 return 0;
145}
146
147static int inquiry_cache_open(struct inode *inode, struct file *file)
148{
149 return single_open(file, inquiry_cache_show, inode->i_private);
150}
151
152static const struct file_operations inquiry_cache_fops = {
153 .open = inquiry_cache_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
156 .release = single_release,
157};
158
Marcel Holtmann041000b2013-10-17 12:02:31 -0700159static int voice_setting_get(void *data, u64 *val)
160{
161 struct hci_dev *hdev = data;
162
163 hci_dev_lock(hdev);
164 *val = hdev->voice_setting;
165 hci_dev_unlock(hdev);
166
167 return 0;
168}
169
170DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
171 NULL, "0x%4.4llx\n");
172
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700173static int auto_accept_delay_set(void *data, u64 val)
174{
175 struct hci_dev *hdev = data;
176
177 hci_dev_lock(hdev);
178 hdev->auto_accept_delay = val;
179 hci_dev_unlock(hdev);
180
181 return 0;
182}
183
184static int auto_accept_delay_get(void *data, u64 *val)
185{
186 struct hci_dev *hdev = data;
187
188 hci_dev_lock(hdev);
189 *val = hdev->auto_accept_delay;
190 hci_dev_unlock(hdev);
191
192 return 0;
193}
194
195DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
196 auto_accept_delay_set, "%llu\n");
197
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700198static int static_address_show(struct seq_file *f, void *p)
199{
200 struct hci_dev *hdev = f->private;
201
202 hci_dev_lock(hdev);
203 seq_printf(f, "%pMR\n", &hdev->static_addr);
204 hci_dev_unlock(hdev);
205
206 return 0;
207}
208
209static int static_address_open(struct inode *inode, struct file *file)
210{
211 return single_open(file, static_address_show, inode->i_private);
212}
213
214static const struct file_operations static_address_fops = {
215 .open = static_address_open,
216 .read = seq_read,
217 .llseek = seq_lseek,
218 .release = single_release,
219};
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221/* ---- HCI requests ---- */
222
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200225 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 if (hdev->req_status == HCI_REQ_PEND) {
228 hdev->req_result = result;
229 hdev->req_status = HCI_REQ_DONE;
230 wake_up_interruptible(&hdev->req_wait_q);
231 }
232}
233
234static void hci_req_cancel(struct hci_dev *hdev, int err)
235{
236 BT_DBG("%s err 0x%2.2x", hdev->name, err);
237
238 if (hdev->req_status == HCI_REQ_PEND) {
239 hdev->req_result = err;
240 hdev->req_status = HCI_REQ_CANCELED;
241 wake_up_interruptible(&hdev->req_wait_q);
242 }
243}
244
Fengguang Wu77a63e02013-04-20 16:24:31 +0300245static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
246 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300247{
248 struct hci_ev_cmd_complete *ev;
249 struct hci_event_hdr *hdr;
250 struct sk_buff *skb;
251
252 hci_dev_lock(hdev);
253
254 skb = hdev->recv_evt;
255 hdev->recv_evt = NULL;
256
257 hci_dev_unlock(hdev);
258
259 if (!skb)
260 return ERR_PTR(-ENODATA);
261
262 if (skb->len < sizeof(*hdr)) {
263 BT_ERR("Too short HCI event");
264 goto failed;
265 }
266
267 hdr = (void *) skb->data;
268 skb_pull(skb, HCI_EVENT_HDR_SIZE);
269
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300270 if (event) {
271 if (hdr->evt != event)
272 goto failed;
273 return skb;
274 }
275
Johan Hedberg75e84b72013-04-02 13:35:04 +0300276 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
277 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
278 goto failed;
279 }
280
281 if (skb->len < sizeof(*ev)) {
282 BT_ERR("Too short cmd_complete event");
283 goto failed;
284 }
285
286 ev = (void *) skb->data;
287 skb_pull(skb, sizeof(*ev));
288
289 if (opcode == __le16_to_cpu(ev->opcode))
290 return skb;
291
292 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
293 __le16_to_cpu(ev->opcode));
294
295failed:
296 kfree_skb(skb);
297 return ERR_PTR(-ENODATA);
298}
299
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300300struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300301 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300302{
303 DECLARE_WAITQUEUE(wait, current);
304 struct hci_request req;
305 int err = 0;
306
307 BT_DBG("%s", hdev->name);
308
309 hci_req_init(&req, hdev);
310
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300311 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300312
313 hdev->req_status = HCI_REQ_PEND;
314
315 err = hci_req_run(&req, hci_req_sync_complete);
316 if (err < 0)
317 return ERR_PTR(err);
318
319 add_wait_queue(&hdev->req_wait_q, &wait);
320 set_current_state(TASK_INTERRUPTIBLE);
321
322 schedule_timeout(timeout);
323
324 remove_wait_queue(&hdev->req_wait_q, &wait);
325
326 if (signal_pending(current))
327 return ERR_PTR(-EINTR);
328
329 switch (hdev->req_status) {
330 case HCI_REQ_DONE:
331 err = -bt_to_errno(hdev->req_result);
332 break;
333
334 case HCI_REQ_CANCELED:
335 err = -hdev->req_result;
336 break;
337
338 default:
339 err = -ETIMEDOUT;
340 break;
341 }
342
343 hdev->req_status = hdev->req_result = 0;
344
345 BT_DBG("%s end: err %d", hdev->name, err);
346
347 if (err < 0)
348 return ERR_PTR(err);
349
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300350 return hci_get_cmd_complete(hdev, opcode, event);
351}
352EXPORT_SYMBOL(__hci_cmd_sync_ev);
353
354struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300355 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300356{
357 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300358}
359EXPORT_SYMBOL(__hci_cmd_sync);
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200362static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 void (*func)(struct hci_request *req,
364 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200367 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 DECLARE_WAITQUEUE(wait, current);
369 int err = 0;
370
371 BT_DBG("%s start", hdev->name);
372
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 hci_req_init(&req, hdev);
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 hdev->req_status = HCI_REQ_PEND;
376
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 err = hci_req_run(&req, hci_req_sync_complete);
380 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200381 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300382
383 /* ENODATA means the HCI request command queue is empty.
384 * This can happen when a request with conditionals doesn't
385 * trigger any commands to be sent. This is normal behavior
386 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 */
Andre Guedes920c8302013-03-08 11:20:15 -0300388 if (err == -ENODATA)
389 return 0;
390
391 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200392 }
393
Andre Guedesbc4445c2013-03-08 11:20:13 -0300394 add_wait_queue(&hdev->req_wait_q, &wait);
395 set_current_state(TASK_INTERRUPTIBLE);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 schedule_timeout(timeout);
398
399 remove_wait_queue(&hdev->req_wait_q, &wait);
400
401 if (signal_pending(current))
402 return -EINTR;
403
404 switch (hdev->req_status) {
405 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700406 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 break;
408
409 case HCI_REQ_CANCELED:
410 err = -hdev->req_result;
411 break;
412
413 default:
414 err = -ETIMEDOUT;
415 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Johan Hedberga5040ef2011-01-10 13:28:59 +0200418 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 BT_DBG("%s end: err %d", hdev->name, err);
421
422 return err;
423}
424
Johan Hedberg01178cd2013-03-05 20:37:41 +0200425static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426 void (*req)(struct hci_request *req,
427 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200428 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 int ret;
431
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200432 if (!test_bit(HCI_UP, &hdev->flags))
433 return -ENETDOWN;
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 /* Serialize all requests */
436 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200437 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 hci_req_unlock(hdev);
439
440 return ret;
441}
442
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 set_bit(HCI_RESET, &req->hdev->flags);
449 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200459 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461
462 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200467{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200469
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200470 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300472
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700473 /* Read Local Supported Commands */
474 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
475
476 /* Read Local Supported Features */
477 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
478
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300479 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200480 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300481
482 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200483 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700484
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700485 /* Read Flow Control Mode */
486 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
487
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700488 /* Read Location Data */
489 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200490}
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200493{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200495
496 BT_DBG("%s %ld", hdev->name, opt);
497
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300498 /* Reset */
499 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300501
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200502 switch (hdev->dev_type) {
503 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200505 break;
506
507 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200509 break;
510
511 default:
512 BT_ERR("Unknown device type %d", hdev->dev_type);
513 break;
514 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200515}
516
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700519 struct hci_dev *hdev = req->hdev;
520
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521 __le16 param;
522 __u8 flt_type;
523
524 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526
527 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
530 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200531 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532
533 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700536 /* Read Number of Supported IAC */
537 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
538
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700539 /* Read Current IAC LAP */
540 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
541
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 /* Clear Event Filters */
543 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545
546 /* Connection accept timeout ~20 secs */
547 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700550 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
551 * but it does not support page scan related HCI commands.
552 */
553 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500554 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
555 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
556 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557}
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300561 struct hci_dev *hdev = req->hdev;
562
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200568
569 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571
572 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574
575 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300577
578 /* LE-only controllers have LE implicitly enabled */
579 if (!lmp_bredr_capable(hdev))
580 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581}
582
583static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
584{
585 if (lmp_ext_inq_capable(hdev))
586 return 0x02;
587
588 if (lmp_inq_rssi_capable(hdev))
589 return 0x01;
590
591 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
592 hdev->lmp_subver == 0x0757)
593 return 0x01;
594
595 if (hdev->manufacturer == 15) {
596 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
597 return 0x01;
598 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
599 return 0x01;
600 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
601 return 0x01;
602 }
603
604 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
605 hdev->lmp_subver == 0x1805)
606 return 0x01;
607
608 return 0x00;
609}
610
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612{
613 u8 mode;
614
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618}
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 struct hci_dev *hdev = req->hdev;
623
Johan Hedberg2177bab2013-03-05 20:37:43 +0200624 /* The second byte is 0xff instead of 0x9f (two reserved bits
625 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
626 * command otherwise.
627 */
628 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
629
630 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
631 * any event mask for pre 1.2 devices.
632 */
633 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
634 return;
635
636 if (lmp_bredr_capable(hdev)) {
637 events[4] |= 0x01; /* Flow Specification Complete */
638 events[4] |= 0x02; /* Inquiry Result with RSSI */
639 events[4] |= 0x04; /* Read Remote Extended Features Complete */
640 events[5] |= 0x08; /* Synchronous Connection Complete */
641 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700642 } else {
643 /* Use a different default for LE-only devices */
644 memset(events, 0, sizeof(events));
645 events[0] |= 0x10; /* Disconnection Complete */
646 events[0] |= 0x80; /* Encryption Change */
647 events[1] |= 0x08; /* Read Remote Version Information Complete */
648 events[1] |= 0x20; /* Command Complete */
649 events[1] |= 0x40; /* Command Status */
650 events[1] |= 0x80; /* Hardware Error */
651 events[2] |= 0x04; /* Number of Completed Packets */
652 events[3] |= 0x02; /* Data Buffer Overflow */
653 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654 }
655
656 if (lmp_inq_rssi_capable(hdev))
657 events[4] |= 0x02; /* Inquiry Result with RSSI */
658
659 if (lmp_sniffsubr_capable(hdev))
660 events[5] |= 0x20; /* Sniff Subrating */
661
662 if (lmp_pause_enc_capable(hdev))
663 events[5] |= 0x80; /* Encryption Key Refresh Complete */
664
665 if (lmp_ext_inq_capable(hdev))
666 events[5] |= 0x40; /* Extended Inquiry Result */
667
668 if (lmp_no_flush_capable(hdev))
669 events[7] |= 0x01; /* Enhanced Flush Complete */
670
671 if (lmp_lsto_capable(hdev))
672 events[6] |= 0x80; /* Link Supervision Timeout Changed */
673
674 if (lmp_ssp_capable(hdev)) {
675 events[6] |= 0x01; /* IO Capability Request */
676 events[6] |= 0x02; /* IO Capability Response */
677 events[6] |= 0x04; /* User Confirmation Request */
678 events[6] |= 0x08; /* User Passkey Request */
679 events[6] |= 0x10; /* Remote OOB Data Request */
680 events[6] |= 0x20; /* Simple Pairing Complete */
681 events[7] |= 0x04; /* User Passkey Notification */
682 events[7] |= 0x08; /* Keypress Notification */
683 events[7] |= 0x10; /* Remote Host Supported
684 * Features Notification
685 */
686 }
687
688 if (lmp_le_capable(hdev))
689 events[7] |= 0x20; /* LE Meta-Event */
690
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692
693 if (lmp_le_capable(hdev)) {
694 memset(events, 0, sizeof(events));
695 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
697 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 }
699}
700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 struct hci_dev *hdev = req->hdev;
704
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300707 else
708 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709
710 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200712
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300715 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
716 * local supported commands HCI command.
717 */
718 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200719 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720
721 if (lmp_ssp_capable(hdev)) {
722 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
723 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
725 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200726 } else {
727 struct hci_cp_write_eir cp;
728
729 memset(hdev->eir, 0, sizeof(hdev->eir));
730 memset(&cp, 0, sizeof(cp));
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200733 }
734 }
735
736 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738
739 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200741
742 if (lmp_ext_feat_capable(hdev)) {
743 struct hci_cp_read_local_ext_features cp;
744
745 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
747 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748 }
749
750 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
751 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
753 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200754 }
755}
756
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200758{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200760 struct hci_cp_write_def_link_policy cp;
761 u16 link_policy = 0;
762
763 if (lmp_rswitch_capable(hdev))
764 link_policy |= HCI_LP_RSWITCH;
765 if (lmp_hold_capable(hdev))
766 link_policy |= HCI_LP_HOLD;
767 if (lmp_sniff_capable(hdev))
768 link_policy |= HCI_LP_SNIFF;
769 if (lmp_park_capable(hdev))
770 link_policy |= HCI_LP_PARK;
771
772 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200773 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200774}
775
Johan Hedberg42c6b122013-03-05 20:37:49 +0200776static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200777{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200778 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779 struct hci_cp_write_le_host_supported cp;
780
Johan Hedbergc73eee92013-04-19 18:35:21 +0300781 /* LE-only devices do not support explicit enablement */
782 if (!lmp_bredr_capable(hdev))
783 return;
784
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785 memset(&cp, 0, sizeof(cp));
786
787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
788 cp.le = 0x01;
789 cp.simul = lmp_le_br_capable(hdev);
790 }
791
792 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
794 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200795}
796
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300797static void hci_set_event_mask_page_2(struct hci_request *req)
798{
799 struct hci_dev *hdev = req->hdev;
800 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
801
802 /* If Connectionless Slave Broadcast master role is supported
803 * enable all necessary events for it.
804 */
805 if (hdev->features[2][0] & 0x01) {
806 events[1] |= 0x40; /* Triggered Clock Capture */
807 events[1] |= 0x80; /* Synchronization Train Complete */
808 events[2] |= 0x10; /* Slave Page Response Timeout */
809 events[2] |= 0x20; /* CSB Channel Map Change */
810 }
811
812 /* If Connectionless Slave Broadcast slave role is supported
813 * enable all necessary events for it.
814 */
815 if (hdev->features[2][0] & 0x02) {
816 events[2] |= 0x01; /* Synchronization Train Received */
817 events[2] |= 0x02; /* CSB Receive */
818 events[2] |= 0x04; /* CSB Timeout */
819 events[2] |= 0x08; /* Truncated Page Complete */
820 }
821
822 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
823}
824
Johan Hedberg42c6b122013-03-05 20:37:49 +0200825static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200826{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300828 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200829
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100830 /* Some Broadcom based Bluetooth controllers do not support the
831 * Delete Stored Link Key command. They are clearly indicating its
832 * absence in the bit mask of supported commands.
833 *
834 * Check the supported commands and only if the the command is marked
835 * as supported send it. If not supported assume that the controller
836 * does not have actual support for stored link keys which makes this
837 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700838 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300839 if (hdev->commands[6] & 0x80) {
840 struct hci_cp_delete_stored_link_key cp;
841
842 bacpy(&cp.bdaddr, BDADDR_ANY);
843 cp.delete_all = 0x01;
844 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
845 sizeof(cp), &cp);
846 }
847
Johan Hedberg2177bab2013-03-05 20:37:43 +0200848 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200850
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700851 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300853
854 /* Read features beyond page 1 if available */
855 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
856 struct hci_cp_read_local_ext_features cp;
857
858 cp.page = p;
859 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
860 sizeof(cp), &cp);
861 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200862}
863
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300864static void hci_init4_req(struct hci_request *req, unsigned long opt)
865{
866 struct hci_dev *hdev = req->hdev;
867
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300868 /* Set event mask page 2 if the HCI command for it is supported */
869 if (hdev->commands[22] & 0x04)
870 hci_set_event_mask_page_2(req);
871
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300872 /* Check for Synchronization Train support */
873 if (hdev->features[2][0] & 0x04)
874 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
875}
876
Johan Hedberg2177bab2013-03-05 20:37:43 +0200877static int __hci_init(struct hci_dev *hdev)
878{
879 int err;
880
881 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
882 if (err < 0)
883 return err;
884
885 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
886 * BR/EDR/LE type controllers. AMP controllers only need the
887 * first stage init.
888 */
889 if (hdev->dev_type != HCI_BREDR)
890 return 0;
891
892 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
893 if (err < 0)
894 return err;
895
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300896 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
897 if (err < 0)
898 return err;
899
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700900 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
901 if (err < 0)
902 return err;
903
904 /* Only create debugfs entries during the initial setup
905 * phase and not every time the controller gets powered on.
906 */
907 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
908 return 0;
909
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700910 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
911 &blacklist_fops);
912
Marcel Holtmann47219832013-10-17 17:24:15 -0700913 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
914
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700915 if (lmp_bredr_capable(hdev)) {
916 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
917 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -0700918 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
919 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700920 }
921
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700922 if (lmp_ssp_capable(hdev))
923 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
924 hdev, &auto_accept_delay_fops);
925
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700926 if (lmp_le_capable(hdev))
927 debugfs_create_file("static_address", 0444, hdev->debugfs,
928 hdev, &static_address_fops);
929
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700930 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200931}
932
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934{
935 __u8 scan = opt;
936
Johan Hedberg42c6b122013-03-05 20:37:49 +0200937 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200940 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
942
Johan Hedberg42c6b122013-03-05 20:37:49 +0200943static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 __u8 auth = opt;
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 __u8 encrypt = opt;
956
Johan Hedberg42c6b122013-03-05 20:37:49 +0200957 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200959 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200964{
965 __le16 policy = cpu_to_le16(opt);
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200968
969 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200971}
972
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900973/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * Device is held on return. */
975struct hci_dev *hci_dev_get(int index)
976{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200977 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 BT_DBG("%d", index);
980
981 if (index < 0)
982 return NULL;
983
984 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200985 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (d->id == index) {
987 hdev = hci_dev_hold(d);
988 break;
989 }
990 }
991 read_unlock(&hci_dev_list_lock);
992 return hdev;
993}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200996
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200997bool hci_discovery_active(struct hci_dev *hdev)
998{
999 struct discovery_state *discov = &hdev->discovery;
1000
Andre Guedes6fbe1952012-02-03 17:47:58 -03001001 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001002 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001003 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001004 return true;
1005
Andre Guedes6fbe1952012-02-03 17:47:58 -03001006 default:
1007 return false;
1008 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001009}
1010
Johan Hedbergff9ef572012-01-04 14:23:45 +02001011void hci_discovery_set_state(struct hci_dev *hdev, int state)
1012{
1013 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1014
1015 if (hdev->discovery.state == state)
1016 return;
1017
1018 switch (state) {
1019 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001020 if (hdev->discovery.state != DISCOVERY_STARTING)
1021 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001022 break;
1023 case DISCOVERY_STARTING:
1024 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001025 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001026 mgmt_discovering(hdev, 1);
1027 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001028 case DISCOVERY_RESOLVING:
1029 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001030 case DISCOVERY_STOPPING:
1031 break;
1032 }
1033
1034 hdev->discovery.state = state;
1035}
1036
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001037void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Johan Hedberg30883512012-01-04 14:16:21 +02001039 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001040 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Johan Hedberg561aafb2012-01-04 13:31:59 +02001042 list_for_each_entry_safe(p, n, &cache->all, all) {
1043 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001044 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001046
1047 INIT_LIST_HEAD(&cache->unknown);
1048 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001051struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1052 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Johan Hedberg30883512012-01-04 14:16:21 +02001054 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct inquiry_entry *e;
1056
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001057 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Johan Hedberg561aafb2012-01-04 13:31:59 +02001059 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001061 return e;
1062 }
1063
1064 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
Johan Hedberg561aafb2012-01-04 13:31:59 +02001067struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001068 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001069{
Johan Hedberg30883512012-01-04 14:16:21 +02001070 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001071 struct inquiry_entry *e;
1072
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001073 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001074
1075 list_for_each_entry(e, &cache->unknown, list) {
1076 if (!bacmp(&e->data.bdaddr, bdaddr))
1077 return e;
1078 }
1079
1080 return NULL;
1081}
1082
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001083struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001084 bdaddr_t *bdaddr,
1085 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001086{
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *e;
1089
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001090 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001091
1092 list_for_each_entry(e, &cache->resolve, list) {
1093 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1094 return e;
1095 if (!bacmp(&e->data.bdaddr, bdaddr))
1096 return e;
1097 }
1098
1099 return NULL;
1100}
1101
Johan Hedberga3d4e202012-01-09 00:53:02 +02001102void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001103 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001104{
1105 struct discovery_state *cache = &hdev->discovery;
1106 struct list_head *pos = &cache->resolve;
1107 struct inquiry_entry *p;
1108
1109 list_del(&ie->list);
1110
1111 list_for_each_entry(p, &cache->resolve, list) {
1112 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001113 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001114 break;
1115 pos = &p->list;
1116 }
1117
1118 list_add(&ie->list, pos);
1119}
1120
Johan Hedberg31754052012-01-04 13:39:52 +02001121bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001122 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123{
Johan Hedberg30883512012-01-04 14:16:21 +02001124 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001125 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001127 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Szymon Janc2b2fec42012-11-20 11:38:54 +01001129 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1130
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001131 if (ssp)
1132 *ssp = data->ssp_mode;
1133
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001134 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001135 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001136 if (ie->data.ssp_mode && ssp)
1137 *ssp = true;
1138
Johan Hedberga3d4e202012-01-09 00:53:02 +02001139 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001140 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001141 ie->data.rssi = data->rssi;
1142 hci_inquiry_cache_update_resolve(hdev, ie);
1143 }
1144
Johan Hedberg561aafb2012-01-04 13:31:59 +02001145 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001146 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001147
Johan Hedberg561aafb2012-01-04 13:31:59 +02001148 /* Entry not in the cache. Add new one. */
1149 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1150 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001151 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001152
1153 list_add(&ie->all, &cache->all);
1154
1155 if (name_known) {
1156 ie->name_state = NAME_KNOWN;
1157 } else {
1158 ie->name_state = NAME_NOT_KNOWN;
1159 list_add(&ie->list, &cache->unknown);
1160 }
1161
1162update:
1163 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001164 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001165 ie->name_state = NAME_KNOWN;
1166 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
1168
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001169 memcpy(&ie->data, data, sizeof(*data));
1170 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001172
1173 if (ie->name_state == NAME_NOT_KNOWN)
1174 return false;
1175
1176 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
1179static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1180{
Johan Hedberg30883512012-01-04 14:16:21 +02001181 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 struct inquiry_info *info = (struct inquiry_info *) buf;
1183 struct inquiry_entry *e;
1184 int copied = 0;
1185
Johan Hedberg561aafb2012-01-04 13:31:59 +02001186 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001188
1189 if (copied >= num)
1190 break;
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 bacpy(&info->bdaddr, &data->bdaddr);
1193 info->pscan_rep_mode = data->pscan_rep_mode;
1194 info->pscan_period_mode = data->pscan_period_mode;
1195 info->pscan_mode = data->pscan_mode;
1196 memcpy(info->dev_class, data->dev_class, 3);
1197 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001200 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
1202
1203 BT_DBG("cache %p, copied %d", cache, copied);
1204 return copied;
1205}
1206
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 struct hci_cp_inquiry cp;
1212
1213 BT_DBG("%s", hdev->name);
1214
1215 if (test_bit(HCI_INQUIRY, &hdev->flags))
1216 return;
1217
1218 /* Start Inquiry */
1219 memcpy(&cp.lap, &ir->lap, 3);
1220 cp.length = ir->length;
1221 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
Andre Guedes3e13fa12013-03-27 20:04:56 -03001225static int wait_inquiry(void *word)
1226{
1227 schedule();
1228 return signal_pending(current);
1229}
1230
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231int hci_inquiry(void __user *arg)
1232{
1233 __u8 __user *ptr = arg;
1234 struct hci_inquiry_req ir;
1235 struct hci_dev *hdev;
1236 int err = 0, do_inquiry = 0, max_rsp;
1237 long timeo;
1238 __u8 *buf;
1239
1240 if (copy_from_user(&ir, ptr, sizeof(ir)))
1241 return -EFAULT;
1242
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001243 hdev = hci_dev_get(ir.dev_id);
1244 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 return -ENODEV;
1246
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001247 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1248 err = -EBUSY;
1249 goto done;
1250 }
1251
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001252 if (hdev->dev_type != HCI_BREDR) {
1253 err = -EOPNOTSUPP;
1254 goto done;
1255 }
1256
Johan Hedberg56f87902013-10-02 13:43:13 +03001257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1258 err = -EOPNOTSUPP;
1259 goto done;
1260 }
1261
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001262 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001263 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001264 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001265 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 do_inquiry = 1;
1267 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001268 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Marcel Holtmann04837f62006-07-03 10:02:33 +02001270 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001271
1272 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001273 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1274 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001275 if (err < 0)
1276 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001277
1278 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1279 * cleared). If it is interrupted by a signal, return -EINTR.
1280 */
1281 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1282 TASK_INTERRUPTIBLE))
1283 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001286 /* for unlimited number of responses we will use buffer with
1287 * 255 entries
1288 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1290
1291 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1292 * copy it to the user space.
1293 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001294 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001295 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 err = -ENOMEM;
1297 goto done;
1298 }
1299
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001300 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001302 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 BT_DBG("num_rsp %d", ir.num_rsp);
1305
1306 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1307 ptr += sizeof(ir);
1308 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001309 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001311 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 err = -EFAULT;
1313
1314 kfree(buf);
1315
1316done:
1317 hci_dev_put(hdev);
1318 return err;
1319}
1320
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001321static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 int ret = 0;
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 BT_DBG("%s %p", hdev->name, hdev);
1326
1327 hci_req_lock(hdev);
1328
Johan Hovold94324962012-03-15 14:48:41 +01001329 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1330 ret = -ENODEV;
1331 goto done;
1332 }
1333
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001334 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1335 /* Check for rfkill but allow the HCI setup stage to
1336 * proceed (which in itself doesn't cause any RF activity).
1337 */
1338 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1339 ret = -ERFKILL;
1340 goto done;
1341 }
1342
1343 /* Check for valid public address or a configured static
1344 * random adddress, but let the HCI setup proceed to
1345 * be able to determine if there is a public address
1346 * or not.
1347 *
1348 * This check is only valid for BR/EDR controllers
1349 * since AMP controllers do not have an address.
1350 */
1351 if (hdev->dev_type == HCI_BREDR &&
1352 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1353 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1354 ret = -EADDRNOTAVAIL;
1355 goto done;
1356 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001357 }
1358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 if (test_bit(HCI_UP, &hdev->flags)) {
1360 ret = -EALREADY;
1361 goto done;
1362 }
1363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 if (hdev->open(hdev)) {
1365 ret = -EIO;
1366 goto done;
1367 }
1368
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001369 atomic_set(&hdev->cmd_cnt, 1);
1370 set_bit(HCI_INIT, &hdev->flags);
1371
1372 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1373 ret = hdev->setup(hdev);
1374
1375 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001376 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1377 set_bit(HCI_RAW, &hdev->flags);
1378
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001379 if (!test_bit(HCI_RAW, &hdev->flags) &&
1380 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001381 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001384 clear_bit(HCI_INIT, &hdev->flags);
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 if (!ret) {
1387 hci_dev_hold(hdev);
1388 set_bit(HCI_UP, &hdev->flags);
1389 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001390 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001391 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001392 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001393 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001394 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001395 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001396 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001397 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001399 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001400 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001401 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403 skb_queue_purge(&hdev->cmd_q);
1404 skb_queue_purge(&hdev->rx_q);
1405
1406 if (hdev->flush)
1407 hdev->flush(hdev);
1408
1409 if (hdev->sent_cmd) {
1410 kfree_skb(hdev->sent_cmd);
1411 hdev->sent_cmd = NULL;
1412 }
1413
1414 hdev->close(hdev);
1415 hdev->flags = 0;
1416 }
1417
1418done:
1419 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 return ret;
1421}
1422
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001423/* ---- HCI ioctl helpers ---- */
1424
1425int hci_dev_open(__u16 dev)
1426{
1427 struct hci_dev *hdev;
1428 int err;
1429
1430 hdev = hci_dev_get(dev);
1431 if (!hdev)
1432 return -ENODEV;
1433
Johan Hedberge1d08f42013-10-01 22:44:50 +03001434 /* We need to ensure that no other power on/off work is pending
1435 * before proceeding to call hci_dev_do_open. This is
1436 * particularly important if the setup procedure has not yet
1437 * completed.
1438 */
1439 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1440 cancel_delayed_work(&hdev->power_off);
1441
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001442 /* After this call it is guaranteed that the setup procedure
1443 * has finished. This means that error conditions like RFKILL
1444 * or no valid public or static random address apply.
1445 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001446 flush_workqueue(hdev->req_workqueue);
1447
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001448 err = hci_dev_do_open(hdev);
1449
1450 hci_dev_put(hdev);
1451
1452 return err;
1453}
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455static int hci_dev_do_close(struct hci_dev *hdev)
1456{
1457 BT_DBG("%s %p", hdev->name, hdev);
1458
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001459 cancel_delayed_work(&hdev->power_off);
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_req_cancel(hdev, ENODEV);
1462 hci_req_lock(hdev);
1463
1464 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001465 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 hci_req_unlock(hdev);
1467 return 0;
1468 }
1469
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001470 /* Flush RX and TX works */
1471 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001472 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001474 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001475 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001476 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001477 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001478 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001479 }
1480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001481 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001482 cancel_delayed_work(&hdev->service_cache);
1483
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001484 cancel_delayed_work_sync(&hdev->le_scan_disable);
1485
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001486 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001487 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001489 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 hci_notify(hdev, HCI_DEV_DOWN);
1492
1493 if (hdev->flush)
1494 hdev->flush(hdev);
1495
1496 /* Reset device */
1497 skb_queue_purge(&hdev->cmd_q);
1498 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001499 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001500 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001501 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001503 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 clear_bit(HCI_INIT, &hdev->flags);
1505 }
1506
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001507 /* flush cmd work */
1508 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 /* Drop queues */
1511 skb_queue_purge(&hdev->rx_q);
1512 skb_queue_purge(&hdev->cmd_q);
1513 skb_queue_purge(&hdev->raw_q);
1514
1515 /* Drop last sent command */
1516 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001517 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 kfree_skb(hdev->sent_cmd);
1519 hdev->sent_cmd = NULL;
1520 }
1521
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001522 kfree_skb(hdev->recv_evt);
1523 hdev->recv_evt = NULL;
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 /* After this point our queues are empty
1526 * and no tasks are scheduled. */
1527 hdev->close(hdev);
1528
Johan Hedberg35b973c2013-03-15 17:06:59 -05001529 /* Clear flags */
1530 hdev->flags = 0;
1531 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1532
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001533 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1534 if (hdev->dev_type == HCI_BREDR) {
1535 hci_dev_lock(hdev);
1536 mgmt_powered(hdev, 0);
1537 hci_dev_unlock(hdev);
1538 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001539 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001540
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001541 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001542 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001543
Johan Hedberge59fda82012-02-22 18:11:53 +02001544 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001545 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 hci_req_unlock(hdev);
1548
1549 hci_dev_put(hdev);
1550 return 0;
1551}
1552
1553int hci_dev_close(__u16 dev)
1554{
1555 struct hci_dev *hdev;
1556 int err;
1557
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001558 hdev = hci_dev_get(dev);
1559 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001561
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001562 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1563 err = -EBUSY;
1564 goto done;
1565 }
1566
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001567 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1568 cancel_delayed_work(&hdev->power_off);
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001572done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 hci_dev_put(hdev);
1574 return err;
1575}
1576
1577int hci_dev_reset(__u16 dev)
1578{
1579 struct hci_dev *hdev;
1580 int ret = 0;
1581
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001582 hdev = hci_dev_get(dev);
1583 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 return -ENODEV;
1585
1586 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Marcel Holtmann808a0492013-08-26 20:57:58 -07001588 if (!test_bit(HCI_UP, &hdev->flags)) {
1589 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001593 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1594 ret = -EBUSY;
1595 goto done;
1596 }
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 /* Drop queues */
1599 skb_queue_purge(&hdev->rx_q);
1600 skb_queue_purge(&hdev->cmd_q);
1601
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001602 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001603 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001605 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607 if (hdev->flush)
1608 hdev->flush(hdev);
1609
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001610 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001611 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001614 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 hci_req_unlock(hdev);
1618 hci_dev_put(hdev);
1619 return ret;
1620}
1621
1622int hci_dev_reset_stat(__u16 dev)
1623{
1624 struct hci_dev *hdev;
1625 int ret = 0;
1626
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001627 hdev = hci_dev_get(dev);
1628 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 return -ENODEV;
1630
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1632 ret = -EBUSY;
1633 goto done;
1634 }
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1637
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001638done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 return ret;
1641}
1642
1643int hci_dev_cmd(unsigned int cmd, void __user *arg)
1644{
1645 struct hci_dev *hdev;
1646 struct hci_dev_req dr;
1647 int err = 0;
1648
1649 if (copy_from_user(&dr, arg, sizeof(dr)))
1650 return -EFAULT;
1651
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001652 hdev = hci_dev_get(dr.dev_id);
1653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return -ENODEV;
1655
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001656 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1657 err = -EBUSY;
1658 goto done;
1659 }
1660
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001661 if (hdev->dev_type != HCI_BREDR) {
1662 err = -EOPNOTSUPP;
1663 goto done;
1664 }
1665
Johan Hedberg56f87902013-10-02 13:43:13 +03001666 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1667 err = -EOPNOTSUPP;
1668 goto done;
1669 }
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 switch (cmd) {
1672 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001673 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1674 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 break;
1676
1677 case HCISETENCRYPT:
1678 if (!lmp_encrypt_capable(hdev)) {
1679 err = -EOPNOTSUPP;
1680 break;
1681 }
1682
1683 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1684 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001685 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1686 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 if (err)
1688 break;
1689 }
1690
Johan Hedberg01178cd2013-03-05 20:37:41 +02001691 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1692 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 break;
1694
1695 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001696 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1697 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 break;
1699
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001700 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001701 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1702 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001703 break;
1704
1705 case HCISETLINKMODE:
1706 hdev->link_mode = ((__u16) dr.dev_opt) &
1707 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1708 break;
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 case HCISETPTYPE:
1711 hdev->pkt_type = (__u16) dr.dev_opt;
1712 break;
1713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001715 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1716 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 break;
1718
1719 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001720 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1721 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 break;
1723
1724 default:
1725 err = -EINVAL;
1726 break;
1727 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001728
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001729done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 hci_dev_put(hdev);
1731 return err;
1732}
1733
1734int hci_get_dev_list(void __user *arg)
1735{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001736 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 struct hci_dev_list_req *dl;
1738 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 int n = 0, size, err;
1740 __u16 dev_num;
1741
1742 if (get_user(dev_num, (__u16 __user *) arg))
1743 return -EFAULT;
1744
1745 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1746 return -EINVAL;
1747
1748 size = sizeof(*dl) + dev_num * sizeof(*dr);
1749
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001750 dl = kzalloc(size, GFP_KERNEL);
1751 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return -ENOMEM;
1753
1754 dr = dl->dev_req;
1755
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001756 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001757 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001758 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001759 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001760
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001761 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1762 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 (dr + n)->dev_id = hdev->id;
1765 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 if (++n >= dev_num)
1768 break;
1769 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001770 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 dl->dev_num = n;
1773 size = sizeof(*dl) + n * sizeof(*dr);
1774
1775 err = copy_to_user(arg, dl, size);
1776 kfree(dl);
1777
1778 return err ? -EFAULT : 0;
1779}
1780
1781int hci_get_dev_info(void __user *arg)
1782{
1783 struct hci_dev *hdev;
1784 struct hci_dev_info di;
1785 int err = 0;
1786
1787 if (copy_from_user(&di, arg, sizeof(di)))
1788 return -EFAULT;
1789
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001790 hdev = hci_dev_get(di.dev_id);
1791 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 return -ENODEV;
1793
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001794 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001795 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001797 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1798 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 strcpy(di.name, hdev->name);
1801 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001802 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 di.flags = hdev->flags;
1804 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001805 if (lmp_bredr_capable(hdev)) {
1806 di.acl_mtu = hdev->acl_mtu;
1807 di.acl_pkts = hdev->acl_pkts;
1808 di.sco_mtu = hdev->sco_mtu;
1809 di.sco_pkts = hdev->sco_pkts;
1810 } else {
1811 di.acl_mtu = hdev->le_mtu;
1812 di.acl_pkts = hdev->le_pkts;
1813 di.sco_mtu = 0;
1814 di.sco_pkts = 0;
1815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 di.link_policy = hdev->link_policy;
1817 di.link_mode = hdev->link_mode;
1818
1819 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1820 memcpy(&di.features, &hdev->features, sizeof(di.features));
1821
1822 if (copy_to_user(arg, &di, sizeof(di)))
1823 err = -EFAULT;
1824
1825 hci_dev_put(hdev);
1826
1827 return err;
1828}
1829
1830/* ---- Interface to HCI drivers ---- */
1831
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001832static int hci_rfkill_set_block(void *data, bool blocked)
1833{
1834 struct hci_dev *hdev = data;
1835
1836 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1837
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001838 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1839 return -EBUSY;
1840
Johan Hedberg5e130362013-09-13 08:58:17 +03001841 if (blocked) {
1842 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001843 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1844 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001845 } else {
1846 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001847 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001848
1849 return 0;
1850}
1851
1852static const struct rfkill_ops hci_rfkill_ops = {
1853 .set_block = hci_rfkill_set_block,
1854};
1855
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001856static void hci_power_on(struct work_struct *work)
1857{
1858 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001859 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001860
1861 BT_DBG("%s", hdev->name);
1862
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001863 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001864 if (err < 0) {
1865 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001866 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001867 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001868
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001869 /* During the HCI setup phase, a few error conditions are
1870 * ignored and they need to be checked now. If they are still
1871 * valid, it is important to turn the device back off.
1872 */
1873 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1874 (hdev->dev_type == HCI_BREDR &&
1875 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1876 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001877 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1878 hci_dev_do_close(hdev);
1879 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001880 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1881 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001882 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001883
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001884 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001885 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001886}
1887
1888static void hci_power_off(struct work_struct *work)
1889{
Johan Hedberg32435532011-11-07 22:16:04 +02001890 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001891 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001892
1893 BT_DBG("%s", hdev->name);
1894
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001895 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001896}
1897
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001898static void hci_discov_off(struct work_struct *work)
1899{
1900 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001901
1902 hdev = container_of(work, struct hci_dev, discov_off.work);
1903
1904 BT_DBG("%s", hdev->name);
1905
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001906 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001907}
1908
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001909int hci_uuids_clear(struct hci_dev *hdev)
1910{
Johan Hedberg48210022013-01-27 00:31:28 +02001911 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001912
Johan Hedberg48210022013-01-27 00:31:28 +02001913 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1914 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001915 kfree(uuid);
1916 }
1917
1918 return 0;
1919}
1920
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001921int hci_link_keys_clear(struct hci_dev *hdev)
1922{
1923 struct list_head *p, *n;
1924
1925 list_for_each_safe(p, n, &hdev->link_keys) {
1926 struct link_key *key;
1927
1928 key = list_entry(p, struct link_key, list);
1929
1930 list_del(p);
1931 kfree(key);
1932 }
1933
1934 return 0;
1935}
1936
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001937int hci_smp_ltks_clear(struct hci_dev *hdev)
1938{
1939 struct smp_ltk *k, *tmp;
1940
1941 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1942 list_del(&k->list);
1943 kfree(k);
1944 }
1945
1946 return 0;
1947}
1948
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001949struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1950{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001951 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001952
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001953 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001954 if (bacmp(bdaddr, &k->bdaddr) == 0)
1955 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001956
1957 return NULL;
1958}
1959
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301960static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001961 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001962{
1963 /* Legacy key */
1964 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301965 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001966
1967 /* Debug keys are insecure so don't store them persistently */
1968 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301969 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001970
1971 /* Changed combination key and there's no previous one */
1972 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301973 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001974
1975 /* Security mode 3 case */
1976 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301977 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001978
1979 /* Neither local nor remote side had no-bonding as requirement */
1980 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301981 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001982
1983 /* Local side had dedicated bonding as requirement */
1984 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301985 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001986
1987 /* Remote side had dedicated bonding as requirement */
1988 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301989 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001990
1991 /* If none of the above criteria match, then don't store the key
1992 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301993 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001994}
1995
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001996struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001997{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001998 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001999
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002000 list_for_each_entry(k, &hdev->long_term_keys, list) {
2001 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002002 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002003 continue;
2004
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002005 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002006 }
2007
2008 return NULL;
2009}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002010
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002011struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002012 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002013{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002014 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002015
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002016 list_for_each_entry(k, &hdev->long_term_keys, list)
2017 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002018 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002019 return k;
2020
2021 return NULL;
2022}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002023
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002024int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002025 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002026{
2027 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302028 u8 old_key_type;
2029 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002030
2031 old_key = hci_find_link_key(hdev, bdaddr);
2032 if (old_key) {
2033 old_key_type = old_key->type;
2034 key = old_key;
2035 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002036 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002037 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2038 if (!key)
2039 return -ENOMEM;
2040 list_add(&key->list, &hdev->link_keys);
2041 }
2042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002043 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002044
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002045 /* Some buggy controller combinations generate a changed
2046 * combination key for legacy pairing even when there's no
2047 * previous key */
2048 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002049 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002050 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002051 if (conn)
2052 conn->key_type = type;
2053 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002054
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002055 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002056 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002057 key->pin_len = pin_len;
2058
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002059 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002060 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002061 else
2062 key->type = type;
2063
Johan Hedberg4df378a2011-04-28 11:29:03 -07002064 if (!new_key)
2065 return 0;
2066
2067 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2068
Johan Hedberg744cf192011-11-08 20:40:14 +02002069 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002070
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302071 if (conn)
2072 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002073
2074 return 0;
2075}
2076
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002077int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002078 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002079 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002080{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002081 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002082
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002083 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2084 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002085
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002086 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2087 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002088 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002089 else {
2090 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002091 if (!key)
2092 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002093 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002094 }
2095
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002096 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002097 key->bdaddr_type = addr_type;
2098 memcpy(key->val, tk, sizeof(key->val));
2099 key->authenticated = authenticated;
2100 key->ediv = ediv;
2101 key->enc_size = enc_size;
2102 key->type = type;
2103 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002104
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002105 if (!new_key)
2106 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002107
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002108 if (type & HCI_SMP_LTK)
2109 mgmt_new_ltk(hdev, key, 1);
2110
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002111 return 0;
2112}
2113
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2115{
2116 struct link_key *key;
2117
2118 key = hci_find_link_key(hdev, bdaddr);
2119 if (!key)
2120 return -ENOENT;
2121
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002122 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002123
2124 list_del(&key->list);
2125 kfree(key);
2126
2127 return 0;
2128}
2129
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002130int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2131{
2132 struct smp_ltk *k, *tmp;
2133
2134 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2135 if (bacmp(bdaddr, &k->bdaddr))
2136 continue;
2137
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002139
2140 list_del(&k->list);
2141 kfree(k);
2142 }
2143
2144 return 0;
2145}
2146
Ville Tervo6bd32322011-02-16 16:32:41 +02002147/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002148static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002149{
2150 struct hci_dev *hdev = (void *) arg;
2151
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002152 if (hdev->sent_cmd) {
2153 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2154 u16 opcode = __le16_to_cpu(sent->opcode);
2155
2156 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2157 } else {
2158 BT_ERR("%s command tx timeout", hdev->name);
2159 }
2160
Ville Tervo6bd32322011-02-16 16:32:41 +02002161 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002162 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002163}
2164
Szymon Janc2763eda2011-03-22 13:12:22 +01002165struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002166 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002167{
2168 struct oob_data *data;
2169
2170 list_for_each_entry(data, &hdev->remote_oob_data, list)
2171 if (bacmp(bdaddr, &data->bdaddr) == 0)
2172 return data;
2173
2174 return NULL;
2175}
2176
2177int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2178{
2179 struct oob_data *data;
2180
2181 data = hci_find_remote_oob_data(hdev, bdaddr);
2182 if (!data)
2183 return -ENOENT;
2184
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002185 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002186
2187 list_del(&data->list);
2188 kfree(data);
2189
2190 return 0;
2191}
2192
2193int hci_remote_oob_data_clear(struct hci_dev *hdev)
2194{
2195 struct oob_data *data, *n;
2196
2197 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2198 list_del(&data->list);
2199 kfree(data);
2200 }
2201
2202 return 0;
2203}
2204
2205int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002206 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002207{
2208 struct oob_data *data;
2209
2210 data = hci_find_remote_oob_data(hdev, bdaddr);
2211
2212 if (!data) {
2213 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2214 if (!data)
2215 return -ENOMEM;
2216
2217 bacpy(&data->bdaddr, bdaddr);
2218 list_add(&data->list, &hdev->remote_oob_data);
2219 }
2220
2221 memcpy(data->hash, hash, sizeof(data->hash));
2222 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2223
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002224 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002225
2226 return 0;
2227}
2228
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002229struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2230 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002231{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002232 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002233
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002234 list_for_each_entry(b, &hdev->blacklist, list) {
2235 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002236 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002237 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002238
2239 return NULL;
2240}
2241
2242int hci_blacklist_clear(struct hci_dev *hdev)
2243{
2244 struct list_head *p, *n;
2245
2246 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002247 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002248
2249 list_del(p);
2250 kfree(b);
2251 }
2252
2253 return 0;
2254}
2255
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002256int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002257{
2258 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002260 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002261 return -EBADF;
2262
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002263 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002264 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002265
2266 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002267 if (!entry)
2268 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002269
2270 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002271 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002272
2273 list_add(&entry->list, &hdev->blacklist);
2274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002275 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002276}
2277
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002278int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002279{
2280 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002281
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002282 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002283 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002284
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002285 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002286 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002287 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002288
2289 list_del(&entry->list);
2290 kfree(entry);
2291
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002292 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002293}
2294
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002295static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002296{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002297 if (status) {
2298 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002299
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002300 hci_dev_lock(hdev);
2301 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2302 hci_dev_unlock(hdev);
2303 return;
2304 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002305}
2306
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002307static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002308{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002309 /* General inquiry access code (GIAC) */
2310 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2311 struct hci_request req;
2312 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002313 int err;
2314
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002315 if (status) {
2316 BT_ERR("Failed to disable LE scanning: status %d", status);
2317 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002318 }
2319
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002320 switch (hdev->discovery.type) {
2321 case DISCOV_TYPE_LE:
2322 hci_dev_lock(hdev);
2323 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2324 hci_dev_unlock(hdev);
2325 break;
2326
2327 case DISCOV_TYPE_INTERLEAVED:
2328 hci_req_init(&req, hdev);
2329
2330 memset(&cp, 0, sizeof(cp));
2331 memcpy(&cp.lap, lap, sizeof(cp.lap));
2332 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2333 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2334
2335 hci_dev_lock(hdev);
2336
2337 hci_inquiry_cache_flush(hdev);
2338
2339 err = hci_req_run(&req, inquiry_complete);
2340 if (err) {
2341 BT_ERR("Inquiry request failed: err %d", err);
2342 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2343 }
2344
2345 hci_dev_unlock(hdev);
2346 break;
2347 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002348}
2349
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002350static void le_scan_disable_work(struct work_struct *work)
2351{
2352 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002353 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002354 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002355 struct hci_request req;
2356 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002357
2358 BT_DBG("%s", hdev->name);
2359
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002360 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002361
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002362 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002363 cp.enable = LE_SCAN_DISABLE;
2364 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002365
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002366 err = hci_req_run(&req, le_scan_disable_work_complete);
2367 if (err)
2368 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002369}
2370
David Herrmann9be0dab2012-04-22 14:39:57 +02002371/* Alloc HCI device */
2372struct hci_dev *hci_alloc_dev(void)
2373{
2374 struct hci_dev *hdev;
2375
2376 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2377 if (!hdev)
2378 return NULL;
2379
David Herrmannb1b813d2012-04-22 14:39:58 +02002380 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2381 hdev->esco_type = (ESCO_HV1);
2382 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002383 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2384 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002385 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2386 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002387
David Herrmannb1b813d2012-04-22 14:39:58 +02002388 hdev->sniff_max_interval = 800;
2389 hdev->sniff_min_interval = 80;
2390
Marcel Holtmannbef64732013-10-11 08:23:19 -07002391 hdev->le_scan_interval = 0x0060;
2392 hdev->le_scan_window = 0x0030;
2393
David Herrmannb1b813d2012-04-22 14:39:58 +02002394 mutex_init(&hdev->lock);
2395 mutex_init(&hdev->req_lock);
2396
2397 INIT_LIST_HEAD(&hdev->mgmt_pending);
2398 INIT_LIST_HEAD(&hdev->blacklist);
2399 INIT_LIST_HEAD(&hdev->uuids);
2400 INIT_LIST_HEAD(&hdev->link_keys);
2401 INIT_LIST_HEAD(&hdev->long_term_keys);
2402 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002403 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002404
2405 INIT_WORK(&hdev->rx_work, hci_rx_work);
2406 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2407 INIT_WORK(&hdev->tx_work, hci_tx_work);
2408 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002409
David Herrmannb1b813d2012-04-22 14:39:58 +02002410 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2411 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2412 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2413
David Herrmannb1b813d2012-04-22 14:39:58 +02002414 skb_queue_head_init(&hdev->rx_q);
2415 skb_queue_head_init(&hdev->cmd_q);
2416 skb_queue_head_init(&hdev->raw_q);
2417
2418 init_waitqueue_head(&hdev->req_wait_q);
2419
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002420 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002421
David Herrmannb1b813d2012-04-22 14:39:58 +02002422 hci_init_sysfs(hdev);
2423 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002424
2425 return hdev;
2426}
2427EXPORT_SYMBOL(hci_alloc_dev);
2428
2429/* Free HCI device */
2430void hci_free_dev(struct hci_dev *hdev)
2431{
David Herrmann9be0dab2012-04-22 14:39:57 +02002432 /* will free via device release */
2433 put_device(&hdev->dev);
2434}
2435EXPORT_SYMBOL(hci_free_dev);
2436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437/* Register HCI device */
2438int hci_register_dev(struct hci_dev *hdev)
2439{
David Herrmannb1b813d2012-04-22 14:39:58 +02002440 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
David Herrmann010666a2012-01-07 15:47:07 +01002442 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return -EINVAL;
2444
Mat Martineau08add512011-11-02 16:18:36 -07002445 /* Do not allow HCI_AMP devices to register at index 0,
2446 * so the index can be used as the AMP controller ID.
2447 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002448 switch (hdev->dev_type) {
2449 case HCI_BREDR:
2450 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2451 break;
2452 case HCI_AMP:
2453 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2454 break;
2455 default:
2456 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002458
Sasha Levin3df92b32012-05-27 22:36:56 +02002459 if (id < 0)
2460 return id;
2461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 sprintf(hdev->name, "hci%d", id);
2463 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002464
2465 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2466
Kees Cookd8537542013-07-03 15:04:57 -07002467 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2468 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002469 if (!hdev->workqueue) {
2470 error = -ENOMEM;
2471 goto err;
2472 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002473
Kees Cookd8537542013-07-03 15:04:57 -07002474 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2475 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002476 if (!hdev->req_workqueue) {
2477 destroy_workqueue(hdev->workqueue);
2478 error = -ENOMEM;
2479 goto err;
2480 }
2481
David Herrmann33ca9542011-10-08 14:58:49 +02002482 error = hci_add_sysfs(hdev);
2483 if (error < 0)
2484 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002486 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002487 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2488 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002489 if (hdev->rfkill) {
2490 if (rfkill_register(hdev->rfkill) < 0) {
2491 rfkill_destroy(hdev->rfkill);
2492 hdev->rfkill = NULL;
2493 }
2494 }
2495
Johan Hedberg5e130362013-09-13 08:58:17 +03002496 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2497 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2498
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002499 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002500 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002501
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002502 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002503 /* Assume BR/EDR support until proven otherwise (such as
2504 * through reading supported features during init.
2505 */
2506 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2507 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002508
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002509 write_lock(&hci_dev_list_lock);
2510 list_add(&hdev->list, &hci_dev_list);
2511 write_unlock(&hci_dev_list_lock);
2512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002514 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Johan Hedberg19202572013-01-14 22:33:51 +02002516 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002519
David Herrmann33ca9542011-10-08 14:58:49 +02002520err_wqueue:
2521 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002522 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002523err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002524 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002525
David Herrmann33ca9542011-10-08 14:58:49 +02002526 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527}
2528EXPORT_SYMBOL(hci_register_dev);
2529
2530/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002531void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532{
Sasha Levin3df92b32012-05-27 22:36:56 +02002533 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002534
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002535 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
Johan Hovold94324962012-03-15 14:48:41 +01002537 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2538
Sasha Levin3df92b32012-05-27 22:36:56 +02002539 id = hdev->id;
2540
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002541 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002543 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
2545 hci_dev_do_close(hdev);
2546
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302547 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002548 kfree_skb(hdev->reassembly[i]);
2549
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002550 cancel_work_sync(&hdev->power_on);
2551
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002552 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002553 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002554 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002555 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002556 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002557 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002558
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002559 /* mgmt_index_removed should take care of emptying the
2560 * pending list */
2561 BUG_ON(!list_empty(&hdev->mgmt_pending));
2562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 hci_notify(hdev, HCI_DEV_UNREG);
2564
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002565 if (hdev->rfkill) {
2566 rfkill_unregister(hdev->rfkill);
2567 rfkill_destroy(hdev->rfkill);
2568 }
2569
David Herrmannce242972011-10-08 14:58:48 +02002570 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002571
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002572 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002573 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002574
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002575 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002576 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002577 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002578 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002579 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002580 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002581 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002582
David Herrmanndc946bd2012-01-07 15:47:24 +01002583 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002584
2585 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586}
2587EXPORT_SYMBOL(hci_unregister_dev);
2588
2589/* Suspend HCI device */
2590int hci_suspend_dev(struct hci_dev *hdev)
2591{
2592 hci_notify(hdev, HCI_DEV_SUSPEND);
2593 return 0;
2594}
2595EXPORT_SYMBOL(hci_suspend_dev);
2596
2597/* Resume HCI device */
2598int hci_resume_dev(struct hci_dev *hdev)
2599{
2600 hci_notify(hdev, HCI_DEV_RESUME);
2601 return 0;
2602}
2603EXPORT_SYMBOL(hci_resume_dev);
2604
Marcel Holtmann76bca882009-11-18 00:40:39 +01002605/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002606int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002607{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002608 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002609 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002610 kfree_skb(skb);
2611 return -ENXIO;
2612 }
2613
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002614 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002615 bt_cb(skb)->incoming = 1;
2616
2617 /* Time stamp */
2618 __net_timestamp(skb);
2619
Marcel Holtmann76bca882009-11-18 00:40:39 +01002620 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002621 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002622
Marcel Holtmann76bca882009-11-18 00:40:39 +01002623 return 0;
2624}
2625EXPORT_SYMBOL(hci_recv_frame);
2626
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302627static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002628 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302629{
2630 int len = 0;
2631 int hlen = 0;
2632 int remain = count;
2633 struct sk_buff *skb;
2634 struct bt_skb_cb *scb;
2635
2636 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002637 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302638 return -EILSEQ;
2639
2640 skb = hdev->reassembly[index];
2641
2642 if (!skb) {
2643 switch (type) {
2644 case HCI_ACLDATA_PKT:
2645 len = HCI_MAX_FRAME_SIZE;
2646 hlen = HCI_ACL_HDR_SIZE;
2647 break;
2648 case HCI_EVENT_PKT:
2649 len = HCI_MAX_EVENT_SIZE;
2650 hlen = HCI_EVENT_HDR_SIZE;
2651 break;
2652 case HCI_SCODATA_PKT:
2653 len = HCI_MAX_SCO_SIZE;
2654 hlen = HCI_SCO_HDR_SIZE;
2655 break;
2656 }
2657
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002658 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302659 if (!skb)
2660 return -ENOMEM;
2661
2662 scb = (void *) skb->cb;
2663 scb->expect = hlen;
2664 scb->pkt_type = type;
2665
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302666 hdev->reassembly[index] = skb;
2667 }
2668
2669 while (count) {
2670 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002671 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302672
2673 memcpy(skb_put(skb, len), data, len);
2674
2675 count -= len;
2676 data += len;
2677 scb->expect -= len;
2678 remain = count;
2679
2680 switch (type) {
2681 case HCI_EVENT_PKT:
2682 if (skb->len == HCI_EVENT_HDR_SIZE) {
2683 struct hci_event_hdr *h = hci_event_hdr(skb);
2684 scb->expect = h->plen;
2685
2686 if (skb_tailroom(skb) < scb->expect) {
2687 kfree_skb(skb);
2688 hdev->reassembly[index] = NULL;
2689 return -ENOMEM;
2690 }
2691 }
2692 break;
2693
2694 case HCI_ACLDATA_PKT:
2695 if (skb->len == HCI_ACL_HDR_SIZE) {
2696 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2697 scb->expect = __le16_to_cpu(h->dlen);
2698
2699 if (skb_tailroom(skb) < scb->expect) {
2700 kfree_skb(skb);
2701 hdev->reassembly[index] = NULL;
2702 return -ENOMEM;
2703 }
2704 }
2705 break;
2706
2707 case HCI_SCODATA_PKT:
2708 if (skb->len == HCI_SCO_HDR_SIZE) {
2709 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2710 scb->expect = h->dlen;
2711
2712 if (skb_tailroom(skb) < scb->expect) {
2713 kfree_skb(skb);
2714 hdev->reassembly[index] = NULL;
2715 return -ENOMEM;
2716 }
2717 }
2718 break;
2719 }
2720
2721 if (scb->expect == 0) {
2722 /* Complete frame */
2723
2724 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002725 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302726
2727 hdev->reassembly[index] = NULL;
2728 return remain;
2729 }
2730 }
2731
2732 return remain;
2733}
2734
Marcel Holtmannef222012007-07-11 06:42:04 +02002735int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2736{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302737 int rem = 0;
2738
Marcel Holtmannef222012007-07-11 06:42:04 +02002739 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2740 return -EILSEQ;
2741
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002742 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002743 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302744 if (rem < 0)
2745 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002746
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302747 data += (count - rem);
2748 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002749 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002750
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302751 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002752}
2753EXPORT_SYMBOL(hci_recv_fragment);
2754
Suraj Sumangala99811512010-07-14 13:02:19 +05302755#define STREAM_REASSEMBLY 0
2756
2757int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2758{
2759 int type;
2760 int rem = 0;
2761
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002762 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302763 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2764
2765 if (!skb) {
2766 struct { char type; } *pkt;
2767
2768 /* Start of the frame */
2769 pkt = data;
2770 type = pkt->type;
2771
2772 data++;
2773 count--;
2774 } else
2775 type = bt_cb(skb)->pkt_type;
2776
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002777 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002778 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302779 if (rem < 0)
2780 return rem;
2781
2782 data += (count - rem);
2783 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002784 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302785
2786 return rem;
2787}
2788EXPORT_SYMBOL(hci_recv_stream_fragment);
2789
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790/* ---- Interface to upper protocols ---- */
2791
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792int hci_register_cb(struct hci_cb *cb)
2793{
2794 BT_DBG("%p name %s", cb, cb->name);
2795
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002796 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002798 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800 return 0;
2801}
2802EXPORT_SYMBOL(hci_register_cb);
2803
2804int hci_unregister_cb(struct hci_cb *cb)
2805{
2806 BT_DBG("%p name %s", cb, cb->name);
2807
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002808 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002810 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
2812 return 0;
2813}
2814EXPORT_SYMBOL(hci_unregister_cb);
2815
Marcel Holtmann51086992013-10-10 14:54:19 -07002816static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002818 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002820 /* Time stamp */
2821 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002823 /* Send copy to monitor */
2824 hci_send_to_monitor(hdev, skb);
2825
2826 if (atomic_read(&hdev->promisc)) {
2827 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002828 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 }
2830
2831 /* Get rid of skb owner, prior to sending to the driver. */
2832 skb_orphan(skb);
2833
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002834 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002835 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836}
2837
Johan Hedberg3119ae92013-03-05 20:37:44 +02002838void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2839{
2840 skb_queue_head_init(&req->cmd_q);
2841 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002842 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002843}
2844
2845int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2846{
2847 struct hci_dev *hdev = req->hdev;
2848 struct sk_buff *skb;
2849 unsigned long flags;
2850
2851 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2852
Andre Guedes5d73e032013-03-08 11:20:16 -03002853 /* If an error occured during request building, remove all HCI
2854 * commands queued on the HCI request queue.
2855 */
2856 if (req->err) {
2857 skb_queue_purge(&req->cmd_q);
2858 return req->err;
2859 }
2860
Johan Hedberg3119ae92013-03-05 20:37:44 +02002861 /* Do not allow empty requests */
2862 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002863 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002864
2865 skb = skb_peek_tail(&req->cmd_q);
2866 bt_cb(skb)->req.complete = complete;
2867
2868 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2869 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2870 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2871
2872 queue_work(hdev->workqueue, &hdev->cmd_work);
2873
2874 return 0;
2875}
2876
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002877static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002878 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879{
2880 int len = HCI_COMMAND_HDR_SIZE + plen;
2881 struct hci_command_hdr *hdr;
2882 struct sk_buff *skb;
2883
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002885 if (!skb)
2886 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
2888 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002889 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 hdr->plen = plen;
2891
2892 if (plen)
2893 memcpy(skb_put(skb, plen), param, plen);
2894
2895 BT_DBG("skb len %d", skb->len);
2896
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002897 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002898
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002899 return skb;
2900}
2901
2902/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002903int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2904 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002905{
2906 struct sk_buff *skb;
2907
2908 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2909
2910 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2911 if (!skb) {
2912 BT_ERR("%s no memory for command", hdev->name);
2913 return -ENOMEM;
2914 }
2915
Johan Hedberg11714b32013-03-05 20:37:47 +02002916 /* Stand-alone HCI commands must be flaged as
2917 * single-command requests.
2918 */
2919 bt_cb(skb)->req.start = true;
2920
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002922 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
2924 return 0;
2925}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Johan Hedberg71c76a12013-03-05 20:37:46 +02002927/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002928void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2929 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002930{
2931 struct hci_dev *hdev = req->hdev;
2932 struct sk_buff *skb;
2933
2934 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2935
Andre Guedes34739c12013-03-08 11:20:18 -03002936 /* If an error occured during request building, there is no point in
2937 * queueing the HCI command. We can simply return.
2938 */
2939 if (req->err)
2940 return;
2941
Johan Hedberg71c76a12013-03-05 20:37:46 +02002942 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2943 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002944 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2945 hdev->name, opcode);
2946 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002947 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002948 }
2949
2950 if (skb_queue_empty(&req->cmd_q))
2951 bt_cb(skb)->req.start = true;
2952
Johan Hedberg02350a72013-04-03 21:50:29 +03002953 bt_cb(skb)->req.event = event;
2954
Johan Hedberg71c76a12013-03-05 20:37:46 +02002955 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002956}
2957
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002958void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2959 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002960{
2961 hci_req_add_ev(req, opcode, plen, param, 0);
2962}
2963
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002965void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966{
2967 struct hci_command_hdr *hdr;
2968
2969 if (!hdev->sent_cmd)
2970 return NULL;
2971
2972 hdr = (void *) hdev->sent_cmd->data;
2973
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002974 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 return NULL;
2976
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002977 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
2979 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2980}
2981
2982/* Send ACL data */
2983static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2984{
2985 struct hci_acl_hdr *hdr;
2986 int len = skb->len;
2987
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002988 skb_push(skb, HCI_ACL_HDR_SIZE);
2989 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002990 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002991 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2992 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993}
2994
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002995static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002996 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002998 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 struct hci_dev *hdev = conn->hdev;
3000 struct sk_buff *list;
3001
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003002 skb->len = skb_headlen(skb);
3003 skb->data_len = 0;
3004
3005 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003006
3007 switch (hdev->dev_type) {
3008 case HCI_BREDR:
3009 hci_add_acl_hdr(skb, conn->handle, flags);
3010 break;
3011 case HCI_AMP:
3012 hci_add_acl_hdr(skb, chan->handle, flags);
3013 break;
3014 default:
3015 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3016 return;
3017 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003018
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003019 list = skb_shinfo(skb)->frag_list;
3020 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 /* Non fragmented */
3022 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3023
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003024 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 } else {
3026 /* Fragmented */
3027 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3028
3029 skb_shinfo(skb)->frag_list = NULL;
3030
3031 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003032 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003034 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003035
3036 flags &= ~ACL_START;
3037 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 do {
3039 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003040
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003041 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003042 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043
3044 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3045
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003046 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 } while (list);
3048
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003049 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003051}
3052
3053void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3054{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003055 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003056
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003057 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003058
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003059 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003061 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063
3064/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003065void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066{
3067 struct hci_dev *hdev = conn->hdev;
3068 struct hci_sco_hdr hdr;
3069
3070 BT_DBG("%s len %d", hdev->name, skb->len);
3071
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003072 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 hdr.dlen = skb->len;
3074
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003075 skb_push(skb, HCI_SCO_HDR_SIZE);
3076 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003077 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003079 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003080
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003082 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
3085/* ---- HCI TX task (outgoing data) ---- */
3086
3087/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003088static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3089 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090{
3091 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003092 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003093 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003095 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003097
3098 rcu_read_lock();
3099
3100 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003101 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003103
3104 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3105 continue;
3106
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 num++;
3108
3109 if (c->sent < min) {
3110 min = c->sent;
3111 conn = c;
3112 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003113
3114 if (hci_conn_num(hdev, type) == num)
3115 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 }
3117
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003118 rcu_read_unlock();
3119
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003121 int cnt, q;
3122
3123 switch (conn->type) {
3124 case ACL_LINK:
3125 cnt = hdev->acl_cnt;
3126 break;
3127 case SCO_LINK:
3128 case ESCO_LINK:
3129 cnt = hdev->sco_cnt;
3130 break;
3131 case LE_LINK:
3132 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3133 break;
3134 default:
3135 cnt = 0;
3136 BT_ERR("Unknown link type");
3137 }
3138
3139 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 *quote = q ? q : 1;
3141 } else
3142 *quote = 0;
3143
3144 BT_DBG("conn %p quote %d", conn, *quote);
3145 return conn;
3146}
3147
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003148static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149{
3150 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003151 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152
Ville Tervobae1f5d92011-02-10 22:38:53 -03003153 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003155 rcu_read_lock();
3156
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003158 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003159 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003160 BT_ERR("%s killing stalled connection %pMR",
3161 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003162 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 }
3164 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003165
3166 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167}
3168
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003169static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3170 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003171{
3172 struct hci_conn_hash *h = &hdev->conn_hash;
3173 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003174 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003175 struct hci_conn *conn;
3176 int cnt, q, conn_num = 0;
3177
3178 BT_DBG("%s", hdev->name);
3179
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003180 rcu_read_lock();
3181
3182 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003183 struct hci_chan *tmp;
3184
3185 if (conn->type != type)
3186 continue;
3187
3188 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3189 continue;
3190
3191 conn_num++;
3192
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003193 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003194 struct sk_buff *skb;
3195
3196 if (skb_queue_empty(&tmp->data_q))
3197 continue;
3198
3199 skb = skb_peek(&tmp->data_q);
3200 if (skb->priority < cur_prio)
3201 continue;
3202
3203 if (skb->priority > cur_prio) {
3204 num = 0;
3205 min = ~0;
3206 cur_prio = skb->priority;
3207 }
3208
3209 num++;
3210
3211 if (conn->sent < min) {
3212 min = conn->sent;
3213 chan = tmp;
3214 }
3215 }
3216
3217 if (hci_conn_num(hdev, type) == conn_num)
3218 break;
3219 }
3220
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003221 rcu_read_unlock();
3222
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003223 if (!chan)
3224 return NULL;
3225
3226 switch (chan->conn->type) {
3227 case ACL_LINK:
3228 cnt = hdev->acl_cnt;
3229 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003230 case AMP_LINK:
3231 cnt = hdev->block_cnt;
3232 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003233 case SCO_LINK:
3234 case ESCO_LINK:
3235 cnt = hdev->sco_cnt;
3236 break;
3237 case LE_LINK:
3238 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3239 break;
3240 default:
3241 cnt = 0;
3242 BT_ERR("Unknown link type");
3243 }
3244
3245 q = cnt / num;
3246 *quote = q ? q : 1;
3247 BT_DBG("chan %p quote %d", chan, *quote);
3248 return chan;
3249}
3250
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003251static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3252{
3253 struct hci_conn_hash *h = &hdev->conn_hash;
3254 struct hci_conn *conn;
3255 int num = 0;
3256
3257 BT_DBG("%s", hdev->name);
3258
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003259 rcu_read_lock();
3260
3261 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003262 struct hci_chan *chan;
3263
3264 if (conn->type != type)
3265 continue;
3266
3267 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3268 continue;
3269
3270 num++;
3271
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003272 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003273 struct sk_buff *skb;
3274
3275 if (chan->sent) {
3276 chan->sent = 0;
3277 continue;
3278 }
3279
3280 if (skb_queue_empty(&chan->data_q))
3281 continue;
3282
3283 skb = skb_peek(&chan->data_q);
3284 if (skb->priority >= HCI_PRIO_MAX - 1)
3285 continue;
3286
3287 skb->priority = HCI_PRIO_MAX - 1;
3288
3289 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003290 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003291 }
3292
3293 if (hci_conn_num(hdev, type) == num)
3294 break;
3295 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003296
3297 rcu_read_unlock();
3298
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003299}
3300
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003301static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3302{
3303 /* Calculate count of blocks used by this packet */
3304 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3305}
3306
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003307static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 if (!test_bit(HCI_RAW, &hdev->flags)) {
3310 /* ACL tx timeout must be longer than maximum
3311 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003312 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003313 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003314 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003316}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003318static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003319{
3320 unsigned int cnt = hdev->acl_cnt;
3321 struct hci_chan *chan;
3322 struct sk_buff *skb;
3323 int quote;
3324
3325 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003326
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003327 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003328 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003329 u32 priority = (skb_peek(&chan->data_q))->priority;
3330 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003331 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003332 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003333
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003334 /* Stop if priority has changed */
3335 if (skb->priority < priority)
3336 break;
3337
3338 skb = skb_dequeue(&chan->data_q);
3339
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003340 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003341 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003342
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003343 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 hdev->acl_last_tx = jiffies;
3345
3346 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003347 chan->sent++;
3348 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 }
3350 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003351
3352 if (cnt != hdev->acl_cnt)
3353 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354}
3355
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003356static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003357{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003358 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003359 struct hci_chan *chan;
3360 struct sk_buff *skb;
3361 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003362 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003363
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003364 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003365
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003366 BT_DBG("%s", hdev->name);
3367
3368 if (hdev->dev_type == HCI_AMP)
3369 type = AMP_LINK;
3370 else
3371 type = ACL_LINK;
3372
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003373 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003374 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003375 u32 priority = (skb_peek(&chan->data_q))->priority;
3376 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3377 int blocks;
3378
3379 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003380 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003381
3382 /* Stop if priority has changed */
3383 if (skb->priority < priority)
3384 break;
3385
3386 skb = skb_dequeue(&chan->data_q);
3387
3388 blocks = __get_blocks(hdev, skb);
3389 if (blocks > hdev->block_cnt)
3390 return;
3391
3392 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003393 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003394
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003395 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003396 hdev->acl_last_tx = jiffies;
3397
3398 hdev->block_cnt -= blocks;
3399 quote -= blocks;
3400
3401 chan->sent += blocks;
3402 chan->conn->sent += blocks;
3403 }
3404 }
3405
3406 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003407 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003408}
3409
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003410static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003411{
3412 BT_DBG("%s", hdev->name);
3413
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003414 /* No ACL link over BR/EDR controller */
3415 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3416 return;
3417
3418 /* No AMP link over AMP controller */
3419 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003420 return;
3421
3422 switch (hdev->flow_ctl_mode) {
3423 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3424 hci_sched_acl_pkt(hdev);
3425 break;
3426
3427 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3428 hci_sched_acl_blk(hdev);
3429 break;
3430 }
3431}
3432
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003434static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435{
3436 struct hci_conn *conn;
3437 struct sk_buff *skb;
3438 int quote;
3439
3440 BT_DBG("%s", hdev->name);
3441
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003442 if (!hci_conn_num(hdev, SCO_LINK))
3443 return;
3444
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3446 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3447 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003448 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449
3450 conn->sent++;
3451 if (conn->sent == ~0)
3452 conn->sent = 0;
3453 }
3454 }
3455}
3456
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003457static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003458{
3459 struct hci_conn *conn;
3460 struct sk_buff *skb;
3461 int quote;
3462
3463 BT_DBG("%s", hdev->name);
3464
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003465 if (!hci_conn_num(hdev, ESCO_LINK))
3466 return;
3467
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003468 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3469 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003470 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3471 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003472 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003473
3474 conn->sent++;
3475 if (conn->sent == ~0)
3476 conn->sent = 0;
3477 }
3478 }
3479}
3480
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003481static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003482{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003483 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003484 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003485 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003486
3487 BT_DBG("%s", hdev->name);
3488
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003489 if (!hci_conn_num(hdev, LE_LINK))
3490 return;
3491
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003492 if (!test_bit(HCI_RAW, &hdev->flags)) {
3493 /* LE tx timeout must be longer than maximum
3494 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003495 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003496 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003497 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003498 }
3499
3500 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003501 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003502 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003503 u32 priority = (skb_peek(&chan->data_q))->priority;
3504 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003505 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003506 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003507
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003508 /* Stop if priority has changed */
3509 if (skb->priority < priority)
3510 break;
3511
3512 skb = skb_dequeue(&chan->data_q);
3513
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003514 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003515 hdev->le_last_tx = jiffies;
3516
3517 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003518 chan->sent++;
3519 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003520 }
3521 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003522
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003523 if (hdev->le_pkts)
3524 hdev->le_cnt = cnt;
3525 else
3526 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003527
3528 if (cnt != tmp)
3529 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003530}
3531
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003532static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003534 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 struct sk_buff *skb;
3536
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003537 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003538 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
Marcel Holtmann52de5992013-09-03 18:08:38 -07003540 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3541 /* Schedule queues and send stuff to HCI driver */
3542 hci_sched_acl(hdev);
3543 hci_sched_sco(hdev);
3544 hci_sched_esco(hdev);
3545 hci_sched_le(hdev);
3546 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003547
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 /* Send next queued raw (unknown type) packet */
3549 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003550 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551}
3552
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003553/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554
3555/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003556static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557{
3558 struct hci_acl_hdr *hdr = (void *) skb->data;
3559 struct hci_conn *conn;
3560 __u16 handle, flags;
3561
3562 skb_pull(skb, HCI_ACL_HDR_SIZE);
3563
3564 handle = __le16_to_cpu(hdr->handle);
3565 flags = hci_flags(handle);
3566 handle = hci_handle(handle);
3567
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003568 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003569 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570
3571 hdev->stat.acl_rx++;
3572
3573 hci_dev_lock(hdev);
3574 conn = hci_conn_hash_lookup_handle(hdev, handle);
3575 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003578 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003579
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003581 l2cap_recv_acldata(conn, skb, flags);
3582 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003584 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003585 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 }
3587
3588 kfree_skb(skb);
3589}
3590
3591/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003592static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593{
3594 struct hci_sco_hdr *hdr = (void *) skb->data;
3595 struct hci_conn *conn;
3596 __u16 handle;
3597
3598 skb_pull(skb, HCI_SCO_HDR_SIZE);
3599
3600 handle = __le16_to_cpu(hdr->handle);
3601
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003602 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
3604 hdev->stat.sco_rx++;
3605
3606 hci_dev_lock(hdev);
3607 conn = hci_conn_hash_lookup_handle(hdev, handle);
3608 hci_dev_unlock(hdev);
3609
3610 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003612 sco_recv_scodata(conn, skb);
3613 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003615 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003616 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618
3619 kfree_skb(skb);
3620}
3621
Johan Hedberg9238f362013-03-05 20:37:48 +02003622static bool hci_req_is_complete(struct hci_dev *hdev)
3623{
3624 struct sk_buff *skb;
3625
3626 skb = skb_peek(&hdev->cmd_q);
3627 if (!skb)
3628 return true;
3629
3630 return bt_cb(skb)->req.start;
3631}
3632
Johan Hedberg42c6b122013-03-05 20:37:49 +02003633static void hci_resend_last(struct hci_dev *hdev)
3634{
3635 struct hci_command_hdr *sent;
3636 struct sk_buff *skb;
3637 u16 opcode;
3638
3639 if (!hdev->sent_cmd)
3640 return;
3641
3642 sent = (void *) hdev->sent_cmd->data;
3643 opcode = __le16_to_cpu(sent->opcode);
3644 if (opcode == HCI_OP_RESET)
3645 return;
3646
3647 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3648 if (!skb)
3649 return;
3650
3651 skb_queue_head(&hdev->cmd_q, skb);
3652 queue_work(hdev->workqueue, &hdev->cmd_work);
3653}
3654
Johan Hedberg9238f362013-03-05 20:37:48 +02003655void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3656{
3657 hci_req_complete_t req_complete = NULL;
3658 struct sk_buff *skb;
3659 unsigned long flags;
3660
3661 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3662
Johan Hedberg42c6b122013-03-05 20:37:49 +02003663 /* If the completed command doesn't match the last one that was
3664 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003665 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003666 if (!hci_sent_cmd_data(hdev, opcode)) {
3667 /* Some CSR based controllers generate a spontaneous
3668 * reset complete event during init and any pending
3669 * command will never be completed. In such a case we
3670 * need to resend whatever was the last sent
3671 * command.
3672 */
3673 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3674 hci_resend_last(hdev);
3675
Johan Hedberg9238f362013-03-05 20:37:48 +02003676 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003677 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003678
3679 /* If the command succeeded and there's still more commands in
3680 * this request the request is not yet complete.
3681 */
3682 if (!status && !hci_req_is_complete(hdev))
3683 return;
3684
3685 /* If this was the last command in a request the complete
3686 * callback would be found in hdev->sent_cmd instead of the
3687 * command queue (hdev->cmd_q).
3688 */
3689 if (hdev->sent_cmd) {
3690 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003691
3692 if (req_complete) {
3693 /* We must set the complete callback to NULL to
3694 * avoid calling the callback more than once if
3695 * this function gets called again.
3696 */
3697 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3698
Johan Hedberg9238f362013-03-05 20:37:48 +02003699 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003700 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003701 }
3702
3703 /* Remove all pending commands belonging to this request */
3704 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3705 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3706 if (bt_cb(skb)->req.start) {
3707 __skb_queue_head(&hdev->cmd_q, skb);
3708 break;
3709 }
3710
3711 req_complete = bt_cb(skb)->req.complete;
3712 kfree_skb(skb);
3713 }
3714 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3715
3716call_complete:
3717 if (req_complete)
3718 req_complete(hdev, status);
3719}
3720
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003721static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003723 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 struct sk_buff *skb;
3725
3726 BT_DBG("%s", hdev->name);
3727
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003729 /* Send copy to monitor */
3730 hci_send_to_monitor(hdev, skb);
3731
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 if (atomic_read(&hdev->promisc)) {
3733 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003734 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 }
3736
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003737 if (test_bit(HCI_RAW, &hdev->flags) ||
3738 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 kfree_skb(skb);
3740 continue;
3741 }
3742
3743 if (test_bit(HCI_INIT, &hdev->flags)) {
3744 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003745 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 case HCI_ACLDATA_PKT:
3747 case HCI_SCODATA_PKT:
3748 kfree_skb(skb);
3749 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003750 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 }
3752
3753 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003754 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003756 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 hci_event_packet(hdev, skb);
3758 break;
3759
3760 case HCI_ACLDATA_PKT:
3761 BT_DBG("%s ACL data packet", hdev->name);
3762 hci_acldata_packet(hdev, skb);
3763 break;
3764
3765 case HCI_SCODATA_PKT:
3766 BT_DBG("%s SCO data packet", hdev->name);
3767 hci_scodata_packet(hdev, skb);
3768 break;
3769
3770 default:
3771 kfree_skb(skb);
3772 break;
3773 }
3774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775}
3776
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003777static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003779 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 struct sk_buff *skb;
3781
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003782 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3783 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003786 if (atomic_read(&hdev->cmd_cnt)) {
3787 skb = skb_dequeue(&hdev->cmd_q);
3788 if (!skb)
3789 return;
3790
Wei Yongjun7585b972009-02-25 18:29:52 +08003791 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003793 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003794 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003796 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003797 if (test_bit(HCI_RESET, &hdev->flags))
3798 del_timer(&hdev->cmd_timer);
3799 else
3800 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003801 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 } else {
3803 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003804 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 }
3806 }
3807}