blob: b7c4ada1beb6b3ac92e688406928375b62a96210 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070058/* ---- HCI debugfs entries ---- */
59
60static int inquiry_cache_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
65
66 hci_dev_lock(hdev);
67
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71 &data->bdaddr,
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
77 }
78
79 hci_dev_unlock(hdev);
80
81 return 0;
82}
83
84static int inquiry_cache_open(struct inode *inode, struct file *file)
85{
86 return single_open(file, inquiry_cache_show, inode->i_private);
87}
88
89static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
91 .read = seq_read,
92 .llseek = seq_lseek,
93 .release = single_release,
94};
95
Marcel Holtmann041000b2013-10-17 12:02:31 -070096static int voice_setting_get(void *data, u64 *val)
97{
98 struct hci_dev *hdev = data;
99
100 hci_dev_lock(hdev);
101 *val = hdev->voice_setting;
102 hci_dev_unlock(hdev);
103
104 return 0;
105}
106
107DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
108 NULL, "0x%4.4llx\n");
109
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700110static int auto_accept_delay_set(void *data, u64 val)
111{
112 struct hci_dev *hdev = data;
113
114 hci_dev_lock(hdev);
115 hdev->auto_accept_delay = val;
116 hci_dev_unlock(hdev);
117
118 return 0;
119}
120
121static int auto_accept_delay_get(void *data, u64 *val)
122{
123 struct hci_dev *hdev = data;
124
125 hci_dev_lock(hdev);
126 *val = hdev->auto_accept_delay;
127 hci_dev_unlock(hdev);
128
129 return 0;
130}
131
132DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
133 auto_accept_delay_set, "%llu\n");
134
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700135static int static_address_show(struct seq_file *f, void *p)
136{
137 struct hci_dev *hdev = f->private;
138
139 hci_dev_lock(hdev);
140 seq_printf(f, "%pMR\n", &hdev->static_addr);
141 hci_dev_unlock(hdev);
142
143 return 0;
144}
145
146static int static_address_open(struct inode *inode, struct file *file)
147{
148 return single_open(file, static_address_show, inode->i_private);
149}
150
151static const struct file_operations static_address_fops = {
152 .open = static_address_open,
153 .read = seq_read,
154 .llseek = seq_lseek,
155 .release = single_release,
156};
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/* ---- HCI requests ---- */
159
Johan Hedberg42c6b122013-03-05 20:37:49 +0200160static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200162 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 if (hdev->req_status == HCI_REQ_PEND) {
165 hdev->req_result = result;
166 hdev->req_status = HCI_REQ_DONE;
167 wake_up_interruptible(&hdev->req_wait_q);
168 }
169}
170
171static void hci_req_cancel(struct hci_dev *hdev, int err)
172{
173 BT_DBG("%s err 0x%2.2x", hdev->name, err);
174
175 if (hdev->req_status == HCI_REQ_PEND) {
176 hdev->req_result = err;
177 hdev->req_status = HCI_REQ_CANCELED;
178 wake_up_interruptible(&hdev->req_wait_q);
179 }
180}
181
Fengguang Wu77a63e02013-04-20 16:24:31 +0300182static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
183 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300184{
185 struct hci_ev_cmd_complete *ev;
186 struct hci_event_hdr *hdr;
187 struct sk_buff *skb;
188
189 hci_dev_lock(hdev);
190
191 skb = hdev->recv_evt;
192 hdev->recv_evt = NULL;
193
194 hci_dev_unlock(hdev);
195
196 if (!skb)
197 return ERR_PTR(-ENODATA);
198
199 if (skb->len < sizeof(*hdr)) {
200 BT_ERR("Too short HCI event");
201 goto failed;
202 }
203
204 hdr = (void *) skb->data;
205 skb_pull(skb, HCI_EVENT_HDR_SIZE);
206
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300207 if (event) {
208 if (hdr->evt != event)
209 goto failed;
210 return skb;
211 }
212
Johan Hedberg75e84b72013-04-02 13:35:04 +0300213 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
214 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
215 goto failed;
216 }
217
218 if (skb->len < sizeof(*ev)) {
219 BT_ERR("Too short cmd_complete event");
220 goto failed;
221 }
222
223 ev = (void *) skb->data;
224 skb_pull(skb, sizeof(*ev));
225
226 if (opcode == __le16_to_cpu(ev->opcode))
227 return skb;
228
229 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
230 __le16_to_cpu(ev->opcode));
231
232failed:
233 kfree_skb(skb);
234 return ERR_PTR(-ENODATA);
235}
236
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300237struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300238 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300239{
240 DECLARE_WAITQUEUE(wait, current);
241 struct hci_request req;
242 int err = 0;
243
244 BT_DBG("%s", hdev->name);
245
246 hci_req_init(&req, hdev);
247
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300248 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300249
250 hdev->req_status = HCI_REQ_PEND;
251
252 err = hci_req_run(&req, hci_req_sync_complete);
253 if (err < 0)
254 return ERR_PTR(err);
255
256 add_wait_queue(&hdev->req_wait_q, &wait);
257 set_current_state(TASK_INTERRUPTIBLE);
258
259 schedule_timeout(timeout);
260
261 remove_wait_queue(&hdev->req_wait_q, &wait);
262
263 if (signal_pending(current))
264 return ERR_PTR(-EINTR);
265
266 switch (hdev->req_status) {
267 case HCI_REQ_DONE:
268 err = -bt_to_errno(hdev->req_result);
269 break;
270
271 case HCI_REQ_CANCELED:
272 err = -hdev->req_result;
273 break;
274
275 default:
276 err = -ETIMEDOUT;
277 break;
278 }
279
280 hdev->req_status = hdev->req_result = 0;
281
282 BT_DBG("%s end: err %d", hdev->name, err);
283
284 if (err < 0)
285 return ERR_PTR(err);
286
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300287 return hci_get_cmd_complete(hdev, opcode, event);
288}
289EXPORT_SYMBOL(__hci_cmd_sync_ev);
290
291struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300292 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300293{
294 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300295}
296EXPORT_SYMBOL(__hci_cmd_sync);
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200299static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 void (*func)(struct hci_request *req,
301 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200302 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 DECLARE_WAITQUEUE(wait, current);
306 int err = 0;
307
308 BT_DBG("%s start", hdev->name);
309
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_init(&req, hdev);
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 hdev->req_status = HCI_REQ_PEND;
313
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 err = hci_req_run(&req, hci_req_sync_complete);
317 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200318 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300319
320 /* ENODATA means the HCI request command queue is empty.
321 * This can happen when a request with conditionals doesn't
322 * trigger any commands to be sent. This is normal behavior
323 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200324 */
Andre Guedes920c8302013-03-08 11:20:15 -0300325 if (err == -ENODATA)
326 return 0;
327
328 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200329 }
330
Andre Guedesbc4445c2013-03-08 11:20:13 -0300331 add_wait_queue(&hdev->req_wait_q, &wait);
332 set_current_state(TASK_INTERRUPTIBLE);
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 schedule_timeout(timeout);
335
336 remove_wait_queue(&hdev->req_wait_q, &wait);
337
338 if (signal_pending(current))
339 return -EINTR;
340
341 switch (hdev->req_status) {
342 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700343 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 break;
345
346 case HCI_REQ_CANCELED:
347 err = -hdev->req_result;
348 break;
349
350 default:
351 err = -ETIMEDOUT;
352 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Johan Hedberga5040ef2011-01-10 13:28:59 +0200355 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357 BT_DBG("%s end: err %d", hdev->name, err);
358
359 return err;
360}
361
Johan Hedberg01178cd2013-03-05 20:37:41 +0200362static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 void (*req)(struct hci_request *req,
364 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 int ret;
368
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200369 if (!test_bit(HCI_UP, &hdev->flags))
370 return -ENETDOWN;
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /* Serialize all requests */
373 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200374 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 hci_req_unlock(hdev);
376
377 return ret;
378}
379
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 set_bit(HCI_RESET, &req->hdev->flags);
386 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
Johan Hedberg42c6b122013-03-05 20:37:49 +0200389static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200396 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200404{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200406
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200407 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200408 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300409
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700410 /* Read Local Supported Commands */
411 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
412
413 /* Read Local Supported Features */
414 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
415
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300416 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300418
419 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700421
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700422 /* Read Flow Control Mode */
423 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
424
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700425 /* Read Location Data */
426 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200427}
428
Johan Hedberg42c6b122013-03-05 20:37:49 +0200429static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200430{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432
433 BT_DBG("%s %ld", hdev->name, opt);
434
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300435 /* Reset */
436 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300438
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200439 switch (hdev->dev_type) {
440 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442 break;
443
444 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200446 break;
447
448 default:
449 BT_ERR("Unknown device type %d", hdev->dev_type);
450 break;
451 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200452}
453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700456 struct hci_dev *hdev = req->hdev;
457
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458 __le16 param;
459 __u8 flt_type;
460
461 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200462 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200463
464 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200465 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200466
467 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469
470 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700473 /* Read Number of Supported IAC */
474 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
475
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700476 /* Read Current IAC LAP */
477 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
478
Johan Hedberg2177bab2013-03-05 20:37:43 +0200479 /* Clear Event Filters */
480 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Connection accept timeout ~20 secs */
484 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200485 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700487 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
488 * but it does not support page scan related HCI commands.
489 */
490 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500491 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
492 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
493 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200494}
495
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300498 struct hci_dev *hdev = req->hdev;
499
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200501 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502
503 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505
506 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508
509 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511
512 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200513 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300514
515 /* LE-only controllers have LE implicitly enabled */
516 if (!lmp_bredr_capable(hdev))
517 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518}
519
520static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
521{
522 if (lmp_ext_inq_capable(hdev))
523 return 0x02;
524
525 if (lmp_inq_rssi_capable(hdev))
526 return 0x01;
527
528 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
529 hdev->lmp_subver == 0x0757)
530 return 0x01;
531
532 if (hdev->manufacturer == 15) {
533 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
534 return 0x01;
535 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
536 return 0x01;
537 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
538 return 0x01;
539 }
540
541 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
542 hdev->lmp_subver == 0x1805)
543 return 0x01;
544
545 return 0x00;
546}
547
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549{
550 u8 mode;
551
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553
Johan Hedberg42c6b122013-03-05 20:37:49 +0200554 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200555}
556
Johan Hedberg42c6b122013-03-05 20:37:49 +0200557static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200558{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 struct hci_dev *hdev = req->hdev;
560
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 /* The second byte is 0xff instead of 0x9f (two reserved bits
562 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
563 * command otherwise.
564 */
565 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
566
567 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
568 * any event mask for pre 1.2 devices.
569 */
570 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
571 return;
572
573 if (lmp_bredr_capable(hdev)) {
574 events[4] |= 0x01; /* Flow Specification Complete */
575 events[4] |= 0x02; /* Inquiry Result with RSSI */
576 events[4] |= 0x04; /* Read Remote Extended Features Complete */
577 events[5] |= 0x08; /* Synchronous Connection Complete */
578 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700579 } else {
580 /* Use a different default for LE-only devices */
581 memset(events, 0, sizeof(events));
582 events[0] |= 0x10; /* Disconnection Complete */
583 events[0] |= 0x80; /* Encryption Change */
584 events[1] |= 0x08; /* Read Remote Version Information Complete */
585 events[1] |= 0x20; /* Command Complete */
586 events[1] |= 0x40; /* Command Status */
587 events[1] |= 0x80; /* Hardware Error */
588 events[2] |= 0x04; /* Number of Completed Packets */
589 events[3] |= 0x02; /* Data Buffer Overflow */
590 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 }
592
593 if (lmp_inq_rssi_capable(hdev))
594 events[4] |= 0x02; /* Inquiry Result with RSSI */
595
596 if (lmp_sniffsubr_capable(hdev))
597 events[5] |= 0x20; /* Sniff Subrating */
598
599 if (lmp_pause_enc_capable(hdev))
600 events[5] |= 0x80; /* Encryption Key Refresh Complete */
601
602 if (lmp_ext_inq_capable(hdev))
603 events[5] |= 0x40; /* Extended Inquiry Result */
604
605 if (lmp_no_flush_capable(hdev))
606 events[7] |= 0x01; /* Enhanced Flush Complete */
607
608 if (lmp_lsto_capable(hdev))
609 events[6] |= 0x80; /* Link Supervision Timeout Changed */
610
611 if (lmp_ssp_capable(hdev)) {
612 events[6] |= 0x01; /* IO Capability Request */
613 events[6] |= 0x02; /* IO Capability Response */
614 events[6] |= 0x04; /* User Confirmation Request */
615 events[6] |= 0x08; /* User Passkey Request */
616 events[6] |= 0x10; /* Remote OOB Data Request */
617 events[6] |= 0x20; /* Simple Pairing Complete */
618 events[7] |= 0x04; /* User Passkey Notification */
619 events[7] |= 0x08; /* Keypress Notification */
620 events[7] |= 0x10; /* Remote Host Supported
621 * Features Notification
622 */
623 }
624
625 if (lmp_le_capable(hdev))
626 events[7] |= 0x20; /* LE Meta-Event */
627
Johan Hedberg42c6b122013-03-05 20:37:49 +0200628 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200629
630 if (lmp_le_capable(hdev)) {
631 memset(events, 0, sizeof(events));
632 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200633 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
634 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635 }
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
641
Johan Hedberg2177bab2013-03-05 20:37:43 +0200642 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300644 else
645 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200646
647 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300652 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
653 * local supported commands HCI command.
654 */
655 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657
658 if (lmp_ssp_capable(hdev)) {
659 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
660 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
662 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663 } else {
664 struct hci_cp_write_eir cp;
665
666 memset(hdev->eir, 0, sizeof(hdev->eir));
667 memset(&cp, 0, sizeof(cp));
668
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200670 }
671 }
672
673 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675
676 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678
679 if (lmp_ext_feat_capable(hdev)) {
680 struct hci_cp_read_local_ext_features cp;
681
682 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
684 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 }
686
687 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
688 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
690 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691 }
692}
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697 struct hci_cp_write_def_link_policy cp;
698 u16 link_policy = 0;
699
700 if (lmp_rswitch_capable(hdev))
701 link_policy |= HCI_LP_RSWITCH;
702 if (lmp_hold_capable(hdev))
703 link_policy |= HCI_LP_HOLD;
704 if (lmp_sniff_capable(hdev))
705 link_policy |= HCI_LP_SNIFF;
706 if (lmp_park_capable(hdev))
707 link_policy |= HCI_LP_PARK;
708
709 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711}
712
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716 struct hci_cp_write_le_host_supported cp;
717
Johan Hedbergc73eee92013-04-19 18:35:21 +0300718 /* LE-only devices do not support explicit enablement */
719 if (!lmp_bredr_capable(hdev))
720 return;
721
Johan Hedberg2177bab2013-03-05 20:37:43 +0200722 memset(&cp, 0, sizeof(cp));
723
724 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
725 cp.le = 0x01;
726 cp.simul = lmp_le_br_capable(hdev);
727 }
728
729 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
731 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200732}
733
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300734static void hci_set_event_mask_page_2(struct hci_request *req)
735{
736 struct hci_dev *hdev = req->hdev;
737 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
738
739 /* If Connectionless Slave Broadcast master role is supported
740 * enable all necessary events for it.
741 */
742 if (hdev->features[2][0] & 0x01) {
743 events[1] |= 0x40; /* Triggered Clock Capture */
744 events[1] |= 0x80; /* Synchronization Train Complete */
745 events[2] |= 0x10; /* Slave Page Response Timeout */
746 events[2] |= 0x20; /* CSB Channel Map Change */
747 }
748
749 /* If Connectionless Slave Broadcast slave role is supported
750 * enable all necessary events for it.
751 */
752 if (hdev->features[2][0] & 0x02) {
753 events[2] |= 0x01; /* Synchronization Train Received */
754 events[2] |= 0x02; /* CSB Receive */
755 events[2] |= 0x04; /* CSB Timeout */
756 events[2] |= 0x08; /* Truncated Page Complete */
757 }
758
759 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
760}
761
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200763{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300765 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100767 /* Some Broadcom based Bluetooth controllers do not support the
768 * Delete Stored Link Key command. They are clearly indicating its
769 * absence in the bit mask of supported commands.
770 *
771 * Check the supported commands and only if the the command is marked
772 * as supported send it. If not supported assume that the controller
773 * does not have actual support for stored link keys which makes this
774 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700775 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300776 if (hdev->commands[6] & 0x80) {
777 struct hci_cp_delete_stored_link_key cp;
778
779 bacpy(&cp.bdaddr, BDADDR_ANY);
780 cp.delete_all = 0x01;
781 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
782 sizeof(cp), &cp);
783 }
784
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200786 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200787
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700788 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200789 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300790
791 /* Read features beyond page 1 if available */
792 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793 struct hci_cp_read_local_ext_features cp;
794
795 cp.page = p;
796 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797 sizeof(cp), &cp);
798 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200799}
800
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300801static void hci_init4_req(struct hci_request *req, unsigned long opt)
802{
803 struct hci_dev *hdev = req->hdev;
804
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300805 /* Set event mask page 2 if the HCI command for it is supported */
806 if (hdev->commands[22] & 0x04)
807 hci_set_event_mask_page_2(req);
808
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300809 /* Check for Synchronization Train support */
810 if (hdev->features[2][0] & 0x04)
811 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
812}
813
Johan Hedberg2177bab2013-03-05 20:37:43 +0200814static int __hci_init(struct hci_dev *hdev)
815{
816 int err;
817
818 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
819 if (err < 0)
820 return err;
821
822 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
823 * BR/EDR/LE type controllers. AMP controllers only need the
824 * first stage init.
825 */
826 if (hdev->dev_type != HCI_BREDR)
827 return 0;
828
829 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
830 if (err < 0)
831 return err;
832
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300833 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
834 if (err < 0)
835 return err;
836
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700837 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
838 if (err < 0)
839 return err;
840
841 /* Only create debugfs entries during the initial setup
842 * phase and not every time the controller gets powered on.
843 */
844 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
845 return 0;
846
847 if (lmp_bredr_capable(hdev)) {
848 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
849 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -0700850 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
851 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700852 }
853
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700854 if (lmp_ssp_capable(hdev))
855 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
856 hdev, &auto_accept_delay_fops);
857
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700858 if (lmp_le_capable(hdev))
859 debugfs_create_file("static_address", 0444, hdev->debugfs,
860 hdev, &static_address_fops);
861
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700862 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200863}
864
Johan Hedberg42c6b122013-03-05 20:37:49 +0200865static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
867 __u8 scan = opt;
868
Johan Hedberg42c6b122013-03-05 20:37:49 +0200869 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873}
874
Johan Hedberg42c6b122013-03-05 20:37:49 +0200875static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
877 __u8 auth = opt;
878
Johan Hedberg42c6b122013-03-05 20:37:49 +0200879 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200882 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883}
884
Johan Hedberg42c6b122013-03-05 20:37:49 +0200885static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 __u8 encrypt = opt;
888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200891 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200892 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Johan Hedberg42c6b122013-03-05 20:37:49 +0200895static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200896{
897 __le16 policy = cpu_to_le16(opt);
898
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200900
901 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200903}
904
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900905/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 * Device is held on return. */
907struct hci_dev *hci_dev_get(int index)
908{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200909 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911 BT_DBG("%d", index);
912
913 if (index < 0)
914 return NULL;
915
916 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200917 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (d->id == index) {
919 hdev = hci_dev_hold(d);
920 break;
921 }
922 }
923 read_unlock(&hci_dev_list_lock);
924 return hdev;
925}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200928
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200929bool hci_discovery_active(struct hci_dev *hdev)
930{
931 struct discovery_state *discov = &hdev->discovery;
932
Andre Guedes6fbe1952012-02-03 17:47:58 -0300933 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300934 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300935 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200936 return true;
937
Andre Guedes6fbe1952012-02-03 17:47:58 -0300938 default:
939 return false;
940 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200941}
942
Johan Hedbergff9ef572012-01-04 14:23:45 +0200943void hci_discovery_set_state(struct hci_dev *hdev, int state)
944{
945 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
946
947 if (hdev->discovery.state == state)
948 return;
949
950 switch (state) {
951 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300952 if (hdev->discovery.state != DISCOVERY_STARTING)
953 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200954 break;
955 case DISCOVERY_STARTING:
956 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300957 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200958 mgmt_discovering(hdev, 1);
959 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200960 case DISCOVERY_RESOLVING:
961 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200962 case DISCOVERY_STOPPING:
963 break;
964 }
965
966 hdev->discovery.state = state;
967}
968
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300969void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
Johan Hedberg30883512012-01-04 14:16:21 +0200971 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200972 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Johan Hedberg561aafb2012-01-04 13:31:59 +0200974 list_for_each_entry_safe(p, n, &cache->all, all) {
975 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200976 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200978
979 INIT_LIST_HEAD(&cache->unknown);
980 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300983struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
984 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
Johan Hedberg30883512012-01-04 14:16:21 +0200986 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct inquiry_entry *e;
988
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300989 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Johan Hedberg561aafb2012-01-04 13:31:59 +0200991 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200993 return e;
994 }
995
996 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
Johan Hedberg561aafb2012-01-04 13:31:59 +0200999struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001000 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001001{
Johan Hedberg30883512012-01-04 14:16:21 +02001002 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001003 struct inquiry_entry *e;
1004
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001005 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001006
1007 list_for_each_entry(e, &cache->unknown, list) {
1008 if (!bacmp(&e->data.bdaddr, bdaddr))
1009 return e;
1010 }
1011
1012 return NULL;
1013}
1014
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001015struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001016 bdaddr_t *bdaddr,
1017 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001018{
1019 struct discovery_state *cache = &hdev->discovery;
1020 struct inquiry_entry *e;
1021
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001022 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001023
1024 list_for_each_entry(e, &cache->resolve, list) {
1025 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1026 return e;
1027 if (!bacmp(&e->data.bdaddr, bdaddr))
1028 return e;
1029 }
1030
1031 return NULL;
1032}
1033
Johan Hedberga3d4e202012-01-09 00:53:02 +02001034void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001035 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001036{
1037 struct discovery_state *cache = &hdev->discovery;
1038 struct list_head *pos = &cache->resolve;
1039 struct inquiry_entry *p;
1040
1041 list_del(&ie->list);
1042
1043 list_for_each_entry(p, &cache->resolve, list) {
1044 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001045 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001046 break;
1047 pos = &p->list;
1048 }
1049
1050 list_add(&ie->list, pos);
1051}
1052
Johan Hedberg31754052012-01-04 13:39:52 +02001053bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001054 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
Johan Hedberg30883512012-01-04 14:16:21 +02001056 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001057 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001059 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Szymon Janc2b2fec42012-11-20 11:38:54 +01001061 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1062
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001063 if (ssp)
1064 *ssp = data->ssp_mode;
1065
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001066 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001067 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001068 if (ie->data.ssp_mode && ssp)
1069 *ssp = true;
1070
Johan Hedberga3d4e202012-01-09 00:53:02 +02001071 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001072 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001073 ie->data.rssi = data->rssi;
1074 hci_inquiry_cache_update_resolve(hdev, ie);
1075 }
1076
Johan Hedberg561aafb2012-01-04 13:31:59 +02001077 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001078 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001079
Johan Hedberg561aafb2012-01-04 13:31:59 +02001080 /* Entry not in the cache. Add new one. */
1081 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1082 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001083 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001084
1085 list_add(&ie->all, &cache->all);
1086
1087 if (name_known) {
1088 ie->name_state = NAME_KNOWN;
1089 } else {
1090 ie->name_state = NAME_NOT_KNOWN;
1091 list_add(&ie->list, &cache->unknown);
1092 }
1093
1094update:
1095 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001096 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001097 ie->name_state = NAME_KNOWN;
1098 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 }
1100
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001101 memcpy(&ie->data, data, sizeof(*data));
1102 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001104
1105 if (ie->name_state == NAME_NOT_KNOWN)
1106 return false;
1107
1108 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109}
1110
1111static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1112{
Johan Hedberg30883512012-01-04 14:16:21 +02001113 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 struct inquiry_info *info = (struct inquiry_info *) buf;
1115 struct inquiry_entry *e;
1116 int copied = 0;
1117
Johan Hedberg561aafb2012-01-04 13:31:59 +02001118 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001120
1121 if (copied >= num)
1122 break;
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 bacpy(&info->bdaddr, &data->bdaddr);
1125 info->pscan_rep_mode = data->pscan_rep_mode;
1126 info->pscan_period_mode = data->pscan_period_mode;
1127 info->pscan_mode = data->pscan_mode;
1128 memcpy(info->dev_class, data->dev_class, 3);
1129 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001132 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134
1135 BT_DBG("cache %p, copied %d", cache, copied);
1136 return copied;
1137}
1138
Johan Hedberg42c6b122013-03-05 20:37:49 +02001139static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
1141 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 struct hci_cp_inquiry cp;
1144
1145 BT_DBG("%s", hdev->name);
1146
1147 if (test_bit(HCI_INQUIRY, &hdev->flags))
1148 return;
1149
1150 /* Start Inquiry */
1151 memcpy(&cp.lap, &ir->lap, 3);
1152 cp.length = ir->length;
1153 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001154 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155}
1156
Andre Guedes3e13fa12013-03-27 20:04:56 -03001157static int wait_inquiry(void *word)
1158{
1159 schedule();
1160 return signal_pending(current);
1161}
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163int hci_inquiry(void __user *arg)
1164{
1165 __u8 __user *ptr = arg;
1166 struct hci_inquiry_req ir;
1167 struct hci_dev *hdev;
1168 int err = 0, do_inquiry = 0, max_rsp;
1169 long timeo;
1170 __u8 *buf;
1171
1172 if (copy_from_user(&ir, ptr, sizeof(ir)))
1173 return -EFAULT;
1174
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001175 hdev = hci_dev_get(ir.dev_id);
1176 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 return -ENODEV;
1178
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001179 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1180 err = -EBUSY;
1181 goto done;
1182 }
1183
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001184 if (hdev->dev_type != HCI_BREDR) {
1185 err = -EOPNOTSUPP;
1186 goto done;
1187 }
1188
Johan Hedberg56f87902013-10-02 13:43:13 +03001189 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1190 err = -EOPNOTSUPP;
1191 goto done;
1192 }
1193
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001194 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001195 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001196 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001197 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 do_inquiry = 1;
1199 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001200 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Marcel Holtmann04837f62006-07-03 10:02:33 +02001202 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001203
1204 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001205 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1206 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001207 if (err < 0)
1208 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001209
1210 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1211 * cleared). If it is interrupted by a signal, return -EINTR.
1212 */
1213 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1214 TASK_INTERRUPTIBLE))
1215 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001218 /* for unlimited number of responses we will use buffer with
1219 * 255 entries
1220 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1222
1223 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1224 * copy it to the user space.
1225 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001226 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001227 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 err = -ENOMEM;
1229 goto done;
1230 }
1231
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001232 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001234 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
1236 BT_DBG("num_rsp %d", ir.num_rsp);
1237
1238 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1239 ptr += sizeof(ir);
1240 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001241 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001243 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 err = -EFAULT;
1245
1246 kfree(buf);
1247
1248done:
1249 hci_dev_put(hdev);
1250 return err;
1251}
1252
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001253static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 int ret = 0;
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 BT_DBG("%s %p", hdev->name, hdev);
1258
1259 hci_req_lock(hdev);
1260
Johan Hovold94324962012-03-15 14:48:41 +01001261 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1262 ret = -ENODEV;
1263 goto done;
1264 }
1265
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001266 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1267 /* Check for rfkill but allow the HCI setup stage to
1268 * proceed (which in itself doesn't cause any RF activity).
1269 */
1270 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1271 ret = -ERFKILL;
1272 goto done;
1273 }
1274
1275 /* Check for valid public address or a configured static
1276 * random adddress, but let the HCI setup proceed to
1277 * be able to determine if there is a public address
1278 * or not.
1279 *
1280 * This check is only valid for BR/EDR controllers
1281 * since AMP controllers do not have an address.
1282 */
1283 if (hdev->dev_type == HCI_BREDR &&
1284 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1285 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1286 ret = -EADDRNOTAVAIL;
1287 goto done;
1288 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001289 }
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (test_bit(HCI_UP, &hdev->flags)) {
1292 ret = -EALREADY;
1293 goto done;
1294 }
1295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 if (hdev->open(hdev)) {
1297 ret = -EIO;
1298 goto done;
1299 }
1300
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001301 atomic_set(&hdev->cmd_cnt, 1);
1302 set_bit(HCI_INIT, &hdev->flags);
1303
1304 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1305 ret = hdev->setup(hdev);
1306
1307 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001308 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1309 set_bit(HCI_RAW, &hdev->flags);
1310
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001311 if (!test_bit(HCI_RAW, &hdev->flags) &&
1312 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001313 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
1315
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001316 clear_bit(HCI_INIT, &hdev->flags);
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 if (!ret) {
1319 hci_dev_hold(hdev);
1320 set_bit(HCI_UP, &hdev->flags);
1321 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001322 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001323 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001324 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001325 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001326 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001327 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001328 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001329 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001331 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001332 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001333 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 skb_queue_purge(&hdev->cmd_q);
1336 skb_queue_purge(&hdev->rx_q);
1337
1338 if (hdev->flush)
1339 hdev->flush(hdev);
1340
1341 if (hdev->sent_cmd) {
1342 kfree_skb(hdev->sent_cmd);
1343 hdev->sent_cmd = NULL;
1344 }
1345
1346 hdev->close(hdev);
1347 hdev->flags = 0;
1348 }
1349
1350done:
1351 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return ret;
1353}
1354
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001355/* ---- HCI ioctl helpers ---- */
1356
1357int hci_dev_open(__u16 dev)
1358{
1359 struct hci_dev *hdev;
1360 int err;
1361
1362 hdev = hci_dev_get(dev);
1363 if (!hdev)
1364 return -ENODEV;
1365
Johan Hedberge1d08f42013-10-01 22:44:50 +03001366 /* We need to ensure that no other power on/off work is pending
1367 * before proceeding to call hci_dev_do_open. This is
1368 * particularly important if the setup procedure has not yet
1369 * completed.
1370 */
1371 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1372 cancel_delayed_work(&hdev->power_off);
1373
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001374 /* After this call it is guaranteed that the setup procedure
1375 * has finished. This means that error conditions like RFKILL
1376 * or no valid public or static random address apply.
1377 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001378 flush_workqueue(hdev->req_workqueue);
1379
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001380 err = hci_dev_do_open(hdev);
1381
1382 hci_dev_put(hdev);
1383
1384 return err;
1385}
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387static int hci_dev_do_close(struct hci_dev *hdev)
1388{
1389 BT_DBG("%s %p", hdev->name, hdev);
1390
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001391 cancel_delayed_work(&hdev->power_off);
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 hci_req_cancel(hdev, ENODEV);
1394 hci_req_lock(hdev);
1395
1396 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001397 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 hci_req_unlock(hdev);
1399 return 0;
1400 }
1401
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001402 /* Flush RX and TX works */
1403 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001404 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001406 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001407 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001408 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001409 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001410 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001411 }
1412
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001413 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001414 cancel_delayed_work(&hdev->service_cache);
1415
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001416 cancel_delayed_work_sync(&hdev->le_scan_disable);
1417
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001418 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001419 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001421 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423 hci_notify(hdev, HCI_DEV_DOWN);
1424
1425 if (hdev->flush)
1426 hdev->flush(hdev);
1427
1428 /* Reset device */
1429 skb_queue_purge(&hdev->cmd_q);
1430 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001431 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001432 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001433 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001435 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 clear_bit(HCI_INIT, &hdev->flags);
1437 }
1438
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001439 /* flush cmd work */
1440 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442 /* Drop queues */
1443 skb_queue_purge(&hdev->rx_q);
1444 skb_queue_purge(&hdev->cmd_q);
1445 skb_queue_purge(&hdev->raw_q);
1446
1447 /* Drop last sent command */
1448 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001449 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 kfree_skb(hdev->sent_cmd);
1451 hdev->sent_cmd = NULL;
1452 }
1453
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001454 kfree_skb(hdev->recv_evt);
1455 hdev->recv_evt = NULL;
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 /* After this point our queues are empty
1458 * and no tasks are scheduled. */
1459 hdev->close(hdev);
1460
Johan Hedberg35b973c2013-03-15 17:06:59 -05001461 /* Clear flags */
1462 hdev->flags = 0;
1463 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1464
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001465 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1466 if (hdev->dev_type == HCI_BREDR) {
1467 hci_dev_lock(hdev);
1468 mgmt_powered(hdev, 0);
1469 hci_dev_unlock(hdev);
1470 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001471 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001472
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001473 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001474 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001475
Johan Hedberge59fda82012-02-22 18:11:53 +02001476 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001477 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 hci_req_unlock(hdev);
1480
1481 hci_dev_put(hdev);
1482 return 0;
1483}
1484
1485int hci_dev_close(__u16 dev)
1486{
1487 struct hci_dev *hdev;
1488 int err;
1489
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001490 hdev = hci_dev_get(dev);
1491 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001493
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001494 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1495 err = -EBUSY;
1496 goto done;
1497 }
1498
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500 cancel_delayed_work(&hdev->power_off);
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001503
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001504done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 hci_dev_put(hdev);
1506 return err;
1507}
1508
1509int hci_dev_reset(__u16 dev)
1510{
1511 struct hci_dev *hdev;
1512 int ret = 0;
1513
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001514 hdev = hci_dev_get(dev);
1515 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 return -ENODEV;
1517
1518 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Marcel Holtmann808a0492013-08-26 20:57:58 -07001520 if (!test_bit(HCI_UP, &hdev->flags)) {
1521 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1526 ret = -EBUSY;
1527 goto done;
1528 }
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 /* Drop queues */
1531 skb_queue_purge(&hdev->rx_q);
1532 skb_queue_purge(&hdev->cmd_q);
1533
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001534 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001535 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001537 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539 if (hdev->flush)
1540 hdev->flush(hdev);
1541
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001542 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001543 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544
1545 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001546 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
1548done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 hci_req_unlock(hdev);
1550 hci_dev_put(hdev);
1551 return ret;
1552}
1553
1554int hci_dev_reset_stat(__u16 dev)
1555{
1556 struct hci_dev *hdev;
1557 int ret = 0;
1558
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001559 hdev = hci_dev_get(dev);
1560 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 return -ENODEV;
1562
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001563 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1564 ret = -EBUSY;
1565 goto done;
1566 }
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1569
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001570done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 return ret;
1573}
1574
1575int hci_dev_cmd(unsigned int cmd, void __user *arg)
1576{
1577 struct hci_dev *hdev;
1578 struct hci_dev_req dr;
1579 int err = 0;
1580
1581 if (copy_from_user(&dr, arg, sizeof(dr)))
1582 return -EFAULT;
1583
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001584 hdev = hci_dev_get(dr.dev_id);
1585 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 return -ENODEV;
1587
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001588 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1589 err = -EBUSY;
1590 goto done;
1591 }
1592
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001593 if (hdev->dev_type != HCI_BREDR) {
1594 err = -EOPNOTSUPP;
1595 goto done;
1596 }
1597
Johan Hedberg56f87902013-10-02 13:43:13 +03001598 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1599 err = -EOPNOTSUPP;
1600 goto done;
1601 }
1602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 switch (cmd) {
1604 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001605 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1606 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 break;
1608
1609 case HCISETENCRYPT:
1610 if (!lmp_encrypt_capable(hdev)) {
1611 err = -EOPNOTSUPP;
1612 break;
1613 }
1614
1615 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1616 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001617 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1618 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 if (err)
1620 break;
1621 }
1622
Johan Hedberg01178cd2013-03-05 20:37:41 +02001623 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1624 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 break;
1626
1627 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001628 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1629 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 break;
1631
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001632 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001633 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1634 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001635 break;
1636
1637 case HCISETLINKMODE:
1638 hdev->link_mode = ((__u16) dr.dev_opt) &
1639 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1640 break;
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 case HCISETPTYPE:
1643 hdev->pkt_type = (__u16) dr.dev_opt;
1644 break;
1645
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001647 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1648 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 break;
1650
1651 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001652 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1653 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 break;
1655
1656 default:
1657 err = -EINVAL;
1658 break;
1659 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001660
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001661done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 hci_dev_put(hdev);
1663 return err;
1664}
1665
1666int hci_get_dev_list(void __user *arg)
1667{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001668 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 struct hci_dev_list_req *dl;
1670 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 int n = 0, size, err;
1672 __u16 dev_num;
1673
1674 if (get_user(dev_num, (__u16 __user *) arg))
1675 return -EFAULT;
1676
1677 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1678 return -EINVAL;
1679
1680 size = sizeof(*dl) + dev_num * sizeof(*dr);
1681
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001682 dl = kzalloc(size, GFP_KERNEL);
1683 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 return -ENOMEM;
1685
1686 dr = dl->dev_req;
1687
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001688 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001689 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001690 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001691 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001692
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001693 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1694 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 (dr + n)->dev_id = hdev->id;
1697 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 if (++n >= dev_num)
1700 break;
1701 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001702 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 dl->dev_num = n;
1705 size = sizeof(*dl) + n * sizeof(*dr);
1706
1707 err = copy_to_user(arg, dl, size);
1708 kfree(dl);
1709
1710 return err ? -EFAULT : 0;
1711}
1712
1713int hci_get_dev_info(void __user *arg)
1714{
1715 struct hci_dev *hdev;
1716 struct hci_dev_info di;
1717 int err = 0;
1718
1719 if (copy_from_user(&di, arg, sizeof(di)))
1720 return -EFAULT;
1721
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001722 hdev = hci_dev_get(di.dev_id);
1723 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return -ENODEV;
1725
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001726 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001727 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001728
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001729 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1730 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 strcpy(di.name, hdev->name);
1733 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001734 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 di.flags = hdev->flags;
1736 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001737 if (lmp_bredr_capable(hdev)) {
1738 di.acl_mtu = hdev->acl_mtu;
1739 di.acl_pkts = hdev->acl_pkts;
1740 di.sco_mtu = hdev->sco_mtu;
1741 di.sco_pkts = hdev->sco_pkts;
1742 } else {
1743 di.acl_mtu = hdev->le_mtu;
1744 di.acl_pkts = hdev->le_pkts;
1745 di.sco_mtu = 0;
1746 di.sco_pkts = 0;
1747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 di.link_policy = hdev->link_policy;
1749 di.link_mode = hdev->link_mode;
1750
1751 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1752 memcpy(&di.features, &hdev->features, sizeof(di.features));
1753
1754 if (copy_to_user(arg, &di, sizeof(di)))
1755 err = -EFAULT;
1756
1757 hci_dev_put(hdev);
1758
1759 return err;
1760}
1761
1762/* ---- Interface to HCI drivers ---- */
1763
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001764static int hci_rfkill_set_block(void *data, bool blocked)
1765{
1766 struct hci_dev *hdev = data;
1767
1768 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1769
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001770 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1771 return -EBUSY;
1772
Johan Hedberg5e130362013-09-13 08:58:17 +03001773 if (blocked) {
1774 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001775 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1776 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001777 } else {
1778 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001779 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001780
1781 return 0;
1782}
1783
1784static const struct rfkill_ops hci_rfkill_ops = {
1785 .set_block = hci_rfkill_set_block,
1786};
1787
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001788static void hci_power_on(struct work_struct *work)
1789{
1790 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001791 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001792
1793 BT_DBG("%s", hdev->name);
1794
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001795 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001796 if (err < 0) {
1797 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001798 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001799 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001800
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001801 /* During the HCI setup phase, a few error conditions are
1802 * ignored and they need to be checked now. If they are still
1803 * valid, it is important to turn the device back off.
1804 */
1805 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1806 (hdev->dev_type == HCI_BREDR &&
1807 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1808 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001809 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1810 hci_dev_do_close(hdev);
1811 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001812 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1813 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001814 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001815
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001816 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001817 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001818}
1819
1820static void hci_power_off(struct work_struct *work)
1821{
Johan Hedberg32435532011-11-07 22:16:04 +02001822 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001823 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001824
1825 BT_DBG("%s", hdev->name);
1826
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001827 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001828}
1829
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001830static void hci_discov_off(struct work_struct *work)
1831{
1832 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001833
1834 hdev = container_of(work, struct hci_dev, discov_off.work);
1835
1836 BT_DBG("%s", hdev->name);
1837
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001838 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001839}
1840
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001841int hci_uuids_clear(struct hci_dev *hdev)
1842{
Johan Hedberg48210022013-01-27 00:31:28 +02001843 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001844
Johan Hedberg48210022013-01-27 00:31:28 +02001845 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1846 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001847 kfree(uuid);
1848 }
1849
1850 return 0;
1851}
1852
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001853int hci_link_keys_clear(struct hci_dev *hdev)
1854{
1855 struct list_head *p, *n;
1856
1857 list_for_each_safe(p, n, &hdev->link_keys) {
1858 struct link_key *key;
1859
1860 key = list_entry(p, struct link_key, list);
1861
1862 list_del(p);
1863 kfree(key);
1864 }
1865
1866 return 0;
1867}
1868
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001869int hci_smp_ltks_clear(struct hci_dev *hdev)
1870{
1871 struct smp_ltk *k, *tmp;
1872
1873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1874 list_del(&k->list);
1875 kfree(k);
1876 }
1877
1878 return 0;
1879}
1880
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001881struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1882{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001883 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001884
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001885 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001886 if (bacmp(bdaddr, &k->bdaddr) == 0)
1887 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001888
1889 return NULL;
1890}
1891
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301892static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001893 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001894{
1895 /* Legacy key */
1896 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301897 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001898
1899 /* Debug keys are insecure so don't store them persistently */
1900 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301901 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001902
1903 /* Changed combination key and there's no previous one */
1904 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301905 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001906
1907 /* Security mode 3 case */
1908 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301909 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001910
1911 /* Neither local nor remote side had no-bonding as requirement */
1912 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301913 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001914
1915 /* Local side had dedicated bonding as requirement */
1916 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301917 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001918
1919 /* Remote side had dedicated bonding as requirement */
1920 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301921 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001922
1923 /* If none of the above criteria match, then don't store the key
1924 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301925 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001926}
1927
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001928struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001929{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001930 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001931
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001932 list_for_each_entry(k, &hdev->long_term_keys, list) {
1933 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001934 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001935 continue;
1936
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001937 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001938 }
1939
1940 return NULL;
1941}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001942
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001943struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001944 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001945{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001946 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001947
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001948 list_for_each_entry(k, &hdev->long_term_keys, list)
1949 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001950 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001951 return k;
1952
1953 return NULL;
1954}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001955
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001956int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001957 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001958{
1959 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301960 u8 old_key_type;
1961 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001962
1963 old_key = hci_find_link_key(hdev, bdaddr);
1964 if (old_key) {
1965 old_key_type = old_key->type;
1966 key = old_key;
1967 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001968 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001969 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1970 if (!key)
1971 return -ENOMEM;
1972 list_add(&key->list, &hdev->link_keys);
1973 }
1974
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001975 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001976
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001977 /* Some buggy controller combinations generate a changed
1978 * combination key for legacy pairing even when there's no
1979 * previous key */
1980 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001981 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001982 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001983 if (conn)
1984 conn->key_type = type;
1985 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001986
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001987 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001988 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001989 key->pin_len = pin_len;
1990
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001991 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001992 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001993 else
1994 key->type = type;
1995
Johan Hedberg4df378a2011-04-28 11:29:03 -07001996 if (!new_key)
1997 return 0;
1998
1999 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2000
Johan Hedberg744cf192011-11-08 20:40:14 +02002001 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002002
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302003 if (conn)
2004 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002005
2006 return 0;
2007}
2008
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002009int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002010 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002011 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002012{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002013 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002014
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002015 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2016 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002017
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002018 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2019 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002020 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002021 else {
2022 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002023 if (!key)
2024 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002025 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002026 }
2027
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002028 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002029 key->bdaddr_type = addr_type;
2030 memcpy(key->val, tk, sizeof(key->val));
2031 key->authenticated = authenticated;
2032 key->ediv = ediv;
2033 key->enc_size = enc_size;
2034 key->type = type;
2035 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002036
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002037 if (!new_key)
2038 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002039
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002040 if (type & HCI_SMP_LTK)
2041 mgmt_new_ltk(hdev, key, 1);
2042
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002043 return 0;
2044}
2045
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002046int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2047{
2048 struct link_key *key;
2049
2050 key = hci_find_link_key(hdev, bdaddr);
2051 if (!key)
2052 return -ENOENT;
2053
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002054 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002055
2056 list_del(&key->list);
2057 kfree(key);
2058
2059 return 0;
2060}
2061
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002062int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2063{
2064 struct smp_ltk *k, *tmp;
2065
2066 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2067 if (bacmp(bdaddr, &k->bdaddr))
2068 continue;
2069
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002070 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002071
2072 list_del(&k->list);
2073 kfree(k);
2074 }
2075
2076 return 0;
2077}
2078
Ville Tervo6bd32322011-02-16 16:32:41 +02002079/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002080static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002081{
2082 struct hci_dev *hdev = (void *) arg;
2083
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002084 if (hdev->sent_cmd) {
2085 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2086 u16 opcode = __le16_to_cpu(sent->opcode);
2087
2088 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2089 } else {
2090 BT_ERR("%s command tx timeout", hdev->name);
2091 }
2092
Ville Tervo6bd32322011-02-16 16:32:41 +02002093 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002094 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002095}
2096
Szymon Janc2763eda2011-03-22 13:12:22 +01002097struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002098 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002099{
2100 struct oob_data *data;
2101
2102 list_for_each_entry(data, &hdev->remote_oob_data, list)
2103 if (bacmp(bdaddr, &data->bdaddr) == 0)
2104 return data;
2105
2106 return NULL;
2107}
2108
2109int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2110{
2111 struct oob_data *data;
2112
2113 data = hci_find_remote_oob_data(hdev, bdaddr);
2114 if (!data)
2115 return -ENOENT;
2116
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002118
2119 list_del(&data->list);
2120 kfree(data);
2121
2122 return 0;
2123}
2124
2125int hci_remote_oob_data_clear(struct hci_dev *hdev)
2126{
2127 struct oob_data *data, *n;
2128
2129 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2130 list_del(&data->list);
2131 kfree(data);
2132 }
2133
2134 return 0;
2135}
2136
2137int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002138 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002139{
2140 struct oob_data *data;
2141
2142 data = hci_find_remote_oob_data(hdev, bdaddr);
2143
2144 if (!data) {
2145 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2146 if (!data)
2147 return -ENOMEM;
2148
2149 bacpy(&data->bdaddr, bdaddr);
2150 list_add(&data->list, &hdev->remote_oob_data);
2151 }
2152
2153 memcpy(data->hash, hash, sizeof(data->hash));
2154 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2155
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002156 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002157
2158 return 0;
2159}
2160
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002161struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002162{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002163 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002164
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002165 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002166 if (bacmp(bdaddr, &b->bdaddr) == 0)
2167 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002168
2169 return NULL;
2170}
2171
2172int hci_blacklist_clear(struct hci_dev *hdev)
2173{
2174 struct list_head *p, *n;
2175
2176 list_for_each_safe(p, n, &hdev->blacklist) {
2177 struct bdaddr_list *b;
2178
2179 b = list_entry(p, struct bdaddr_list, list);
2180
2181 list_del(p);
2182 kfree(b);
2183 }
2184
2185 return 0;
2186}
2187
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002188int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002189{
2190 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002191
2192 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2193 return -EBADF;
2194
Antti Julku5e762442011-08-25 16:48:02 +03002195 if (hci_blacklist_lookup(hdev, bdaddr))
2196 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002197
2198 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002199 if (!entry)
2200 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002201
2202 bacpy(&entry->bdaddr, bdaddr);
2203
2204 list_add(&entry->list, &hdev->blacklist);
2205
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002206 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002207}
2208
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002209int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002210{
2211 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002212
Szymon Janc1ec918c2011-11-16 09:32:21 +01002213 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002214 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002215
2216 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002217 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002218 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002219
2220 list_del(&entry->list);
2221 kfree(entry);
2222
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002223 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002224}
2225
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002226static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002227{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002228 if (status) {
2229 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002230
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002231 hci_dev_lock(hdev);
2232 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2233 hci_dev_unlock(hdev);
2234 return;
2235 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002236}
2237
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002238static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002239{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002240 /* General inquiry access code (GIAC) */
2241 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2242 struct hci_request req;
2243 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002244 int err;
2245
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002246 if (status) {
2247 BT_ERR("Failed to disable LE scanning: status %d", status);
2248 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002249 }
2250
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002251 switch (hdev->discovery.type) {
2252 case DISCOV_TYPE_LE:
2253 hci_dev_lock(hdev);
2254 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2255 hci_dev_unlock(hdev);
2256 break;
2257
2258 case DISCOV_TYPE_INTERLEAVED:
2259 hci_req_init(&req, hdev);
2260
2261 memset(&cp, 0, sizeof(cp));
2262 memcpy(&cp.lap, lap, sizeof(cp.lap));
2263 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2264 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2265
2266 hci_dev_lock(hdev);
2267
2268 hci_inquiry_cache_flush(hdev);
2269
2270 err = hci_req_run(&req, inquiry_complete);
2271 if (err) {
2272 BT_ERR("Inquiry request failed: err %d", err);
2273 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2274 }
2275
2276 hci_dev_unlock(hdev);
2277 break;
2278 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002279}
2280
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002281static void le_scan_disable_work(struct work_struct *work)
2282{
2283 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002284 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002285 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002286 struct hci_request req;
2287 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002288
2289 BT_DBG("%s", hdev->name);
2290
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002291 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002292
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002293 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002294 cp.enable = LE_SCAN_DISABLE;
2295 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002296
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002297 err = hci_req_run(&req, le_scan_disable_work_complete);
2298 if (err)
2299 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002300}
2301
David Herrmann9be0dab2012-04-22 14:39:57 +02002302/* Alloc HCI device */
2303struct hci_dev *hci_alloc_dev(void)
2304{
2305 struct hci_dev *hdev;
2306
2307 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2308 if (!hdev)
2309 return NULL;
2310
David Herrmannb1b813d2012-04-22 14:39:58 +02002311 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2312 hdev->esco_type = (ESCO_HV1);
2313 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002314 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2315 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002316 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2317 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002318
David Herrmannb1b813d2012-04-22 14:39:58 +02002319 hdev->sniff_max_interval = 800;
2320 hdev->sniff_min_interval = 80;
2321
Marcel Holtmannbef64732013-10-11 08:23:19 -07002322 hdev->le_scan_interval = 0x0060;
2323 hdev->le_scan_window = 0x0030;
2324
David Herrmannb1b813d2012-04-22 14:39:58 +02002325 mutex_init(&hdev->lock);
2326 mutex_init(&hdev->req_lock);
2327
2328 INIT_LIST_HEAD(&hdev->mgmt_pending);
2329 INIT_LIST_HEAD(&hdev->blacklist);
2330 INIT_LIST_HEAD(&hdev->uuids);
2331 INIT_LIST_HEAD(&hdev->link_keys);
2332 INIT_LIST_HEAD(&hdev->long_term_keys);
2333 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002334 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002335
2336 INIT_WORK(&hdev->rx_work, hci_rx_work);
2337 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2338 INIT_WORK(&hdev->tx_work, hci_tx_work);
2339 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002340
David Herrmannb1b813d2012-04-22 14:39:58 +02002341 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2342 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2343 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2344
David Herrmannb1b813d2012-04-22 14:39:58 +02002345 skb_queue_head_init(&hdev->rx_q);
2346 skb_queue_head_init(&hdev->cmd_q);
2347 skb_queue_head_init(&hdev->raw_q);
2348
2349 init_waitqueue_head(&hdev->req_wait_q);
2350
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002351 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002352
David Herrmannb1b813d2012-04-22 14:39:58 +02002353 hci_init_sysfs(hdev);
2354 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002355
2356 return hdev;
2357}
2358EXPORT_SYMBOL(hci_alloc_dev);
2359
2360/* Free HCI device */
2361void hci_free_dev(struct hci_dev *hdev)
2362{
David Herrmann9be0dab2012-04-22 14:39:57 +02002363 /* will free via device release */
2364 put_device(&hdev->dev);
2365}
2366EXPORT_SYMBOL(hci_free_dev);
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368/* Register HCI device */
2369int hci_register_dev(struct hci_dev *hdev)
2370{
David Herrmannb1b813d2012-04-22 14:39:58 +02002371 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
David Herrmann010666a2012-01-07 15:47:07 +01002373 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 return -EINVAL;
2375
Mat Martineau08add512011-11-02 16:18:36 -07002376 /* Do not allow HCI_AMP devices to register at index 0,
2377 * so the index can be used as the AMP controller ID.
2378 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002379 switch (hdev->dev_type) {
2380 case HCI_BREDR:
2381 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2382 break;
2383 case HCI_AMP:
2384 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2385 break;
2386 default:
2387 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002389
Sasha Levin3df92b32012-05-27 22:36:56 +02002390 if (id < 0)
2391 return id;
2392
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 sprintf(hdev->name, "hci%d", id);
2394 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002395
2396 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2397
Kees Cookd8537542013-07-03 15:04:57 -07002398 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2399 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002400 if (!hdev->workqueue) {
2401 error = -ENOMEM;
2402 goto err;
2403 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002404
Kees Cookd8537542013-07-03 15:04:57 -07002405 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2406 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002407 if (!hdev->req_workqueue) {
2408 destroy_workqueue(hdev->workqueue);
2409 error = -ENOMEM;
2410 goto err;
2411 }
2412
David Herrmann33ca9542011-10-08 14:58:49 +02002413 error = hci_add_sysfs(hdev);
2414 if (error < 0)
2415 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002417 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002418 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2419 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002420 if (hdev->rfkill) {
2421 if (rfkill_register(hdev->rfkill) < 0) {
2422 rfkill_destroy(hdev->rfkill);
2423 hdev->rfkill = NULL;
2424 }
2425 }
2426
Johan Hedberg5e130362013-09-13 08:58:17 +03002427 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2428 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2429
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002430 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002431 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002432
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002433 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002434 /* Assume BR/EDR support until proven otherwise (such as
2435 * through reading supported features during init.
2436 */
2437 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2438 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002439
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002440 write_lock(&hci_dev_list_lock);
2441 list_add(&hdev->list, &hci_dev_list);
2442 write_unlock(&hci_dev_list_lock);
2443
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002445 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Johan Hedberg19202572013-01-14 22:33:51 +02002447 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002450
David Herrmann33ca9542011-10-08 14:58:49 +02002451err_wqueue:
2452 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002453 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002454err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002455 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002456
David Herrmann33ca9542011-10-08 14:58:49 +02002457 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458}
2459EXPORT_SYMBOL(hci_register_dev);
2460
2461/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002462void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463{
Sasha Levin3df92b32012-05-27 22:36:56 +02002464 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002465
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002466 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
Johan Hovold94324962012-03-15 14:48:41 +01002468 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2469
Sasha Levin3df92b32012-05-27 22:36:56 +02002470 id = hdev->id;
2471
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002472 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002474 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
2476 hci_dev_do_close(hdev);
2477
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302478 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002479 kfree_skb(hdev->reassembly[i]);
2480
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002481 cancel_work_sync(&hdev->power_on);
2482
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002483 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002484 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002485 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002486 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002487 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002488 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002489
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002490 /* mgmt_index_removed should take care of emptying the
2491 * pending list */
2492 BUG_ON(!list_empty(&hdev->mgmt_pending));
2493
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 hci_notify(hdev, HCI_DEV_UNREG);
2495
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002496 if (hdev->rfkill) {
2497 rfkill_unregister(hdev->rfkill);
2498 rfkill_destroy(hdev->rfkill);
2499 }
2500
David Herrmannce242972011-10-08 14:58:48 +02002501 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002502
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002503 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002504 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002505
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002506 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002507 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002508 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002509 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002510 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002511 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002512 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002513
David Herrmanndc946bd2012-01-07 15:47:24 +01002514 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002515
2516 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517}
2518EXPORT_SYMBOL(hci_unregister_dev);
2519
2520/* Suspend HCI device */
2521int hci_suspend_dev(struct hci_dev *hdev)
2522{
2523 hci_notify(hdev, HCI_DEV_SUSPEND);
2524 return 0;
2525}
2526EXPORT_SYMBOL(hci_suspend_dev);
2527
2528/* Resume HCI device */
2529int hci_resume_dev(struct hci_dev *hdev)
2530{
2531 hci_notify(hdev, HCI_DEV_RESUME);
2532 return 0;
2533}
2534EXPORT_SYMBOL(hci_resume_dev);
2535
Marcel Holtmann76bca882009-11-18 00:40:39 +01002536/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002537int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002538{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002539 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002540 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002541 kfree_skb(skb);
2542 return -ENXIO;
2543 }
2544
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002545 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002546 bt_cb(skb)->incoming = 1;
2547
2548 /* Time stamp */
2549 __net_timestamp(skb);
2550
Marcel Holtmann76bca882009-11-18 00:40:39 +01002551 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002552 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002553
Marcel Holtmann76bca882009-11-18 00:40:39 +01002554 return 0;
2555}
2556EXPORT_SYMBOL(hci_recv_frame);
2557
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302558static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002559 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302560{
2561 int len = 0;
2562 int hlen = 0;
2563 int remain = count;
2564 struct sk_buff *skb;
2565 struct bt_skb_cb *scb;
2566
2567 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002568 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302569 return -EILSEQ;
2570
2571 skb = hdev->reassembly[index];
2572
2573 if (!skb) {
2574 switch (type) {
2575 case HCI_ACLDATA_PKT:
2576 len = HCI_MAX_FRAME_SIZE;
2577 hlen = HCI_ACL_HDR_SIZE;
2578 break;
2579 case HCI_EVENT_PKT:
2580 len = HCI_MAX_EVENT_SIZE;
2581 hlen = HCI_EVENT_HDR_SIZE;
2582 break;
2583 case HCI_SCODATA_PKT:
2584 len = HCI_MAX_SCO_SIZE;
2585 hlen = HCI_SCO_HDR_SIZE;
2586 break;
2587 }
2588
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002589 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302590 if (!skb)
2591 return -ENOMEM;
2592
2593 scb = (void *) skb->cb;
2594 scb->expect = hlen;
2595 scb->pkt_type = type;
2596
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302597 hdev->reassembly[index] = skb;
2598 }
2599
2600 while (count) {
2601 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002602 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302603
2604 memcpy(skb_put(skb, len), data, len);
2605
2606 count -= len;
2607 data += len;
2608 scb->expect -= len;
2609 remain = count;
2610
2611 switch (type) {
2612 case HCI_EVENT_PKT:
2613 if (skb->len == HCI_EVENT_HDR_SIZE) {
2614 struct hci_event_hdr *h = hci_event_hdr(skb);
2615 scb->expect = h->plen;
2616
2617 if (skb_tailroom(skb) < scb->expect) {
2618 kfree_skb(skb);
2619 hdev->reassembly[index] = NULL;
2620 return -ENOMEM;
2621 }
2622 }
2623 break;
2624
2625 case HCI_ACLDATA_PKT:
2626 if (skb->len == HCI_ACL_HDR_SIZE) {
2627 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2628 scb->expect = __le16_to_cpu(h->dlen);
2629
2630 if (skb_tailroom(skb) < scb->expect) {
2631 kfree_skb(skb);
2632 hdev->reassembly[index] = NULL;
2633 return -ENOMEM;
2634 }
2635 }
2636 break;
2637
2638 case HCI_SCODATA_PKT:
2639 if (skb->len == HCI_SCO_HDR_SIZE) {
2640 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2641 scb->expect = h->dlen;
2642
2643 if (skb_tailroom(skb) < scb->expect) {
2644 kfree_skb(skb);
2645 hdev->reassembly[index] = NULL;
2646 return -ENOMEM;
2647 }
2648 }
2649 break;
2650 }
2651
2652 if (scb->expect == 0) {
2653 /* Complete frame */
2654
2655 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002656 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302657
2658 hdev->reassembly[index] = NULL;
2659 return remain;
2660 }
2661 }
2662
2663 return remain;
2664}
2665
Marcel Holtmannef222012007-07-11 06:42:04 +02002666int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2667{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302668 int rem = 0;
2669
Marcel Holtmannef222012007-07-11 06:42:04 +02002670 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2671 return -EILSEQ;
2672
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002673 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002674 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302675 if (rem < 0)
2676 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002677
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302678 data += (count - rem);
2679 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002680 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002681
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302682 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002683}
2684EXPORT_SYMBOL(hci_recv_fragment);
2685
Suraj Sumangala99811512010-07-14 13:02:19 +05302686#define STREAM_REASSEMBLY 0
2687
2688int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2689{
2690 int type;
2691 int rem = 0;
2692
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002693 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302694 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2695
2696 if (!skb) {
2697 struct { char type; } *pkt;
2698
2699 /* Start of the frame */
2700 pkt = data;
2701 type = pkt->type;
2702
2703 data++;
2704 count--;
2705 } else
2706 type = bt_cb(skb)->pkt_type;
2707
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002708 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002709 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302710 if (rem < 0)
2711 return rem;
2712
2713 data += (count - rem);
2714 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002715 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302716
2717 return rem;
2718}
2719EXPORT_SYMBOL(hci_recv_stream_fragment);
2720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721/* ---- Interface to upper protocols ---- */
2722
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723int hci_register_cb(struct hci_cb *cb)
2724{
2725 BT_DBG("%p name %s", cb, cb->name);
2726
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002727 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002729 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
2731 return 0;
2732}
2733EXPORT_SYMBOL(hci_register_cb);
2734
2735int hci_unregister_cb(struct hci_cb *cb)
2736{
2737 BT_DBG("%p name %s", cb, cb->name);
2738
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002739 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002741 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 return 0;
2744}
2745EXPORT_SYMBOL(hci_unregister_cb);
2746
Marcel Holtmann51086992013-10-10 14:54:19 -07002747static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002749 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002751 /* Time stamp */
2752 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002754 /* Send copy to monitor */
2755 hci_send_to_monitor(hdev, skb);
2756
2757 if (atomic_read(&hdev->promisc)) {
2758 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002759 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 }
2761
2762 /* Get rid of skb owner, prior to sending to the driver. */
2763 skb_orphan(skb);
2764
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002765 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002766 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767}
2768
Johan Hedberg3119ae92013-03-05 20:37:44 +02002769void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2770{
2771 skb_queue_head_init(&req->cmd_q);
2772 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002773 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002774}
2775
2776int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2777{
2778 struct hci_dev *hdev = req->hdev;
2779 struct sk_buff *skb;
2780 unsigned long flags;
2781
2782 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2783
Andre Guedes5d73e032013-03-08 11:20:16 -03002784 /* If an error occured during request building, remove all HCI
2785 * commands queued on the HCI request queue.
2786 */
2787 if (req->err) {
2788 skb_queue_purge(&req->cmd_q);
2789 return req->err;
2790 }
2791
Johan Hedberg3119ae92013-03-05 20:37:44 +02002792 /* Do not allow empty requests */
2793 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002794 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002795
2796 skb = skb_peek_tail(&req->cmd_q);
2797 bt_cb(skb)->req.complete = complete;
2798
2799 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2800 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2801 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2802
2803 queue_work(hdev->workqueue, &hdev->cmd_work);
2804
2805 return 0;
2806}
2807
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002808static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002809 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810{
2811 int len = HCI_COMMAND_HDR_SIZE + plen;
2812 struct hci_command_hdr *hdr;
2813 struct sk_buff *skb;
2814
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002816 if (!skb)
2817 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
2819 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002820 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 hdr->plen = plen;
2822
2823 if (plen)
2824 memcpy(skb_put(skb, plen), param, plen);
2825
2826 BT_DBG("skb len %d", skb->len);
2827
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002828 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002829
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002830 return skb;
2831}
2832
2833/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002834int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2835 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002836{
2837 struct sk_buff *skb;
2838
2839 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2840
2841 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2842 if (!skb) {
2843 BT_ERR("%s no memory for command", hdev->name);
2844 return -ENOMEM;
2845 }
2846
Johan Hedberg11714b32013-03-05 20:37:47 +02002847 /* Stand-alone HCI commands must be flaged as
2848 * single-command requests.
2849 */
2850 bt_cb(skb)->req.start = true;
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002853 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
2855 return 0;
2856}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
Johan Hedberg71c76a12013-03-05 20:37:46 +02002858/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002859void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2860 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002861{
2862 struct hci_dev *hdev = req->hdev;
2863 struct sk_buff *skb;
2864
2865 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2866
Andre Guedes34739c12013-03-08 11:20:18 -03002867 /* If an error occured during request building, there is no point in
2868 * queueing the HCI command. We can simply return.
2869 */
2870 if (req->err)
2871 return;
2872
Johan Hedberg71c76a12013-03-05 20:37:46 +02002873 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2874 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002875 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2876 hdev->name, opcode);
2877 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002878 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002879 }
2880
2881 if (skb_queue_empty(&req->cmd_q))
2882 bt_cb(skb)->req.start = true;
2883
Johan Hedberg02350a72013-04-03 21:50:29 +03002884 bt_cb(skb)->req.event = event;
2885
Johan Hedberg71c76a12013-03-05 20:37:46 +02002886 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002887}
2888
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002889void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2890 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002891{
2892 hci_req_add_ev(req, opcode, plen, param, 0);
2893}
2894
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002896void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897{
2898 struct hci_command_hdr *hdr;
2899
2900 if (!hdev->sent_cmd)
2901 return NULL;
2902
2903 hdr = (void *) hdev->sent_cmd->data;
2904
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002905 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 return NULL;
2907
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002908 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
2910 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2911}
2912
2913/* Send ACL data */
2914static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2915{
2916 struct hci_acl_hdr *hdr;
2917 int len = skb->len;
2918
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002919 skb_push(skb, HCI_ACL_HDR_SIZE);
2920 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002921 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002922 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2923 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924}
2925
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002926static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002927 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002929 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 struct hci_dev *hdev = conn->hdev;
2931 struct sk_buff *list;
2932
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002933 skb->len = skb_headlen(skb);
2934 skb->data_len = 0;
2935
2936 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002937
2938 switch (hdev->dev_type) {
2939 case HCI_BREDR:
2940 hci_add_acl_hdr(skb, conn->handle, flags);
2941 break;
2942 case HCI_AMP:
2943 hci_add_acl_hdr(skb, chan->handle, flags);
2944 break;
2945 default:
2946 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2947 return;
2948 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002949
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002950 list = skb_shinfo(skb)->frag_list;
2951 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 /* Non fragmented */
2953 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2954
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002955 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 } else {
2957 /* Fragmented */
2958 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2959
2960 skb_shinfo(skb)->frag_list = NULL;
2961
2962 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002963 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002965 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002966
2967 flags &= ~ACL_START;
2968 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 do {
2970 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002971
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002972 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002973 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
2975 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2976
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002977 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 } while (list);
2979
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002980 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002982}
2983
2984void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2985{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002986 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002987
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002988 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002989
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002990 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002992 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994
2995/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002996void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997{
2998 struct hci_dev *hdev = conn->hdev;
2999 struct hci_sco_hdr hdr;
3000
3001 BT_DBG("%s len %d", hdev->name, skb->len);
3002
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003003 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 hdr.dlen = skb->len;
3005
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003006 skb_push(skb, HCI_SCO_HDR_SIZE);
3007 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003008 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003010 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003011
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003013 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015
3016/* ---- HCI TX task (outgoing data) ---- */
3017
3018/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003019static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3020 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021{
3022 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003023 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003024 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003026 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003028
3029 rcu_read_lock();
3030
3031 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003032 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003034
3035 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3036 continue;
3037
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 num++;
3039
3040 if (c->sent < min) {
3041 min = c->sent;
3042 conn = c;
3043 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003044
3045 if (hci_conn_num(hdev, type) == num)
3046 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 }
3048
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003049 rcu_read_unlock();
3050
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003052 int cnt, q;
3053
3054 switch (conn->type) {
3055 case ACL_LINK:
3056 cnt = hdev->acl_cnt;
3057 break;
3058 case SCO_LINK:
3059 case ESCO_LINK:
3060 cnt = hdev->sco_cnt;
3061 break;
3062 case LE_LINK:
3063 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3064 break;
3065 default:
3066 cnt = 0;
3067 BT_ERR("Unknown link type");
3068 }
3069
3070 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 *quote = q ? q : 1;
3072 } else
3073 *quote = 0;
3074
3075 BT_DBG("conn %p quote %d", conn, *quote);
3076 return conn;
3077}
3078
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003079static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080{
3081 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003082 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
Ville Tervobae1f5d92011-02-10 22:38:53 -03003084 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003086 rcu_read_lock();
3087
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003089 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003090 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003091 BT_ERR("%s killing stalled connection %pMR",
3092 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003093 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 }
3095 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003096
3097 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098}
3099
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003100static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3101 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003102{
3103 struct hci_conn_hash *h = &hdev->conn_hash;
3104 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003105 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003106 struct hci_conn *conn;
3107 int cnt, q, conn_num = 0;
3108
3109 BT_DBG("%s", hdev->name);
3110
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003111 rcu_read_lock();
3112
3113 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003114 struct hci_chan *tmp;
3115
3116 if (conn->type != type)
3117 continue;
3118
3119 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3120 continue;
3121
3122 conn_num++;
3123
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003124 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003125 struct sk_buff *skb;
3126
3127 if (skb_queue_empty(&tmp->data_q))
3128 continue;
3129
3130 skb = skb_peek(&tmp->data_q);
3131 if (skb->priority < cur_prio)
3132 continue;
3133
3134 if (skb->priority > cur_prio) {
3135 num = 0;
3136 min = ~0;
3137 cur_prio = skb->priority;
3138 }
3139
3140 num++;
3141
3142 if (conn->sent < min) {
3143 min = conn->sent;
3144 chan = tmp;
3145 }
3146 }
3147
3148 if (hci_conn_num(hdev, type) == conn_num)
3149 break;
3150 }
3151
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003152 rcu_read_unlock();
3153
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003154 if (!chan)
3155 return NULL;
3156
3157 switch (chan->conn->type) {
3158 case ACL_LINK:
3159 cnt = hdev->acl_cnt;
3160 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003161 case AMP_LINK:
3162 cnt = hdev->block_cnt;
3163 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003164 case SCO_LINK:
3165 case ESCO_LINK:
3166 cnt = hdev->sco_cnt;
3167 break;
3168 case LE_LINK:
3169 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3170 break;
3171 default:
3172 cnt = 0;
3173 BT_ERR("Unknown link type");
3174 }
3175
3176 q = cnt / num;
3177 *quote = q ? q : 1;
3178 BT_DBG("chan %p quote %d", chan, *quote);
3179 return chan;
3180}
3181
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003182static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3183{
3184 struct hci_conn_hash *h = &hdev->conn_hash;
3185 struct hci_conn *conn;
3186 int num = 0;
3187
3188 BT_DBG("%s", hdev->name);
3189
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003190 rcu_read_lock();
3191
3192 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003193 struct hci_chan *chan;
3194
3195 if (conn->type != type)
3196 continue;
3197
3198 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3199 continue;
3200
3201 num++;
3202
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003203 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003204 struct sk_buff *skb;
3205
3206 if (chan->sent) {
3207 chan->sent = 0;
3208 continue;
3209 }
3210
3211 if (skb_queue_empty(&chan->data_q))
3212 continue;
3213
3214 skb = skb_peek(&chan->data_q);
3215 if (skb->priority >= HCI_PRIO_MAX - 1)
3216 continue;
3217
3218 skb->priority = HCI_PRIO_MAX - 1;
3219
3220 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003221 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003222 }
3223
3224 if (hci_conn_num(hdev, type) == num)
3225 break;
3226 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003227
3228 rcu_read_unlock();
3229
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003230}
3231
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003232static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3233{
3234 /* Calculate count of blocks used by this packet */
3235 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3236}
3237
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003238static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 if (!test_bit(HCI_RAW, &hdev->flags)) {
3241 /* ACL tx timeout must be longer than maximum
3242 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003243 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003244 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003245 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003247}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003249static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003250{
3251 unsigned int cnt = hdev->acl_cnt;
3252 struct hci_chan *chan;
3253 struct sk_buff *skb;
3254 int quote;
3255
3256 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003257
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003258 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003259 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003260 u32 priority = (skb_peek(&chan->data_q))->priority;
3261 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003262 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003263 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003264
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003265 /* Stop if priority has changed */
3266 if (skb->priority < priority)
3267 break;
3268
3269 skb = skb_dequeue(&chan->data_q);
3270
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003271 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003272 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003273
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003274 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 hdev->acl_last_tx = jiffies;
3276
3277 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003278 chan->sent++;
3279 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 }
3281 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003282
3283 if (cnt != hdev->acl_cnt)
3284 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285}
3286
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003287static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003288{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003289 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003290 struct hci_chan *chan;
3291 struct sk_buff *skb;
3292 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003293 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003294
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003295 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003296
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003297 BT_DBG("%s", hdev->name);
3298
3299 if (hdev->dev_type == HCI_AMP)
3300 type = AMP_LINK;
3301 else
3302 type = ACL_LINK;
3303
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003304 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003305 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003306 u32 priority = (skb_peek(&chan->data_q))->priority;
3307 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3308 int blocks;
3309
3310 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003311 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003312
3313 /* Stop if priority has changed */
3314 if (skb->priority < priority)
3315 break;
3316
3317 skb = skb_dequeue(&chan->data_q);
3318
3319 blocks = __get_blocks(hdev, skb);
3320 if (blocks > hdev->block_cnt)
3321 return;
3322
3323 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003324 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003325
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003326 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003327 hdev->acl_last_tx = jiffies;
3328
3329 hdev->block_cnt -= blocks;
3330 quote -= blocks;
3331
3332 chan->sent += blocks;
3333 chan->conn->sent += blocks;
3334 }
3335 }
3336
3337 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003338 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003339}
3340
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003341static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003342{
3343 BT_DBG("%s", hdev->name);
3344
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003345 /* No ACL link over BR/EDR controller */
3346 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3347 return;
3348
3349 /* No AMP link over AMP controller */
3350 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003351 return;
3352
3353 switch (hdev->flow_ctl_mode) {
3354 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3355 hci_sched_acl_pkt(hdev);
3356 break;
3357
3358 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3359 hci_sched_acl_blk(hdev);
3360 break;
3361 }
3362}
3363
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003365static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366{
3367 struct hci_conn *conn;
3368 struct sk_buff *skb;
3369 int quote;
3370
3371 BT_DBG("%s", hdev->name);
3372
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003373 if (!hci_conn_num(hdev, SCO_LINK))
3374 return;
3375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3377 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3378 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003379 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380
3381 conn->sent++;
3382 if (conn->sent == ~0)
3383 conn->sent = 0;
3384 }
3385 }
3386}
3387
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003388static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003389{
3390 struct hci_conn *conn;
3391 struct sk_buff *skb;
3392 int quote;
3393
3394 BT_DBG("%s", hdev->name);
3395
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003396 if (!hci_conn_num(hdev, ESCO_LINK))
3397 return;
3398
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003399 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3400 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003401 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3402 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003403 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003404
3405 conn->sent++;
3406 if (conn->sent == ~0)
3407 conn->sent = 0;
3408 }
3409 }
3410}
3411
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003412static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003413{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003414 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003415 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003416 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003417
3418 BT_DBG("%s", hdev->name);
3419
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003420 if (!hci_conn_num(hdev, LE_LINK))
3421 return;
3422
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003423 if (!test_bit(HCI_RAW, &hdev->flags)) {
3424 /* LE tx timeout must be longer than maximum
3425 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003426 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003427 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003428 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003429 }
3430
3431 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003432 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003433 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003434 u32 priority = (skb_peek(&chan->data_q))->priority;
3435 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003436 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003437 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003438
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003439 /* Stop if priority has changed */
3440 if (skb->priority < priority)
3441 break;
3442
3443 skb = skb_dequeue(&chan->data_q);
3444
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003445 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003446 hdev->le_last_tx = jiffies;
3447
3448 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003449 chan->sent++;
3450 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003451 }
3452 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003453
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003454 if (hdev->le_pkts)
3455 hdev->le_cnt = cnt;
3456 else
3457 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003458
3459 if (cnt != tmp)
3460 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003461}
3462
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003463static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003465 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 struct sk_buff *skb;
3467
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003468 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003469 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470
Marcel Holtmann52de5992013-09-03 18:08:38 -07003471 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3472 /* Schedule queues and send stuff to HCI driver */
3473 hci_sched_acl(hdev);
3474 hci_sched_sco(hdev);
3475 hci_sched_esco(hdev);
3476 hci_sched_le(hdev);
3477 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003478
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 /* Send next queued raw (unknown type) packet */
3480 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003481 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003484/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485
3486/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003487static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488{
3489 struct hci_acl_hdr *hdr = (void *) skb->data;
3490 struct hci_conn *conn;
3491 __u16 handle, flags;
3492
3493 skb_pull(skb, HCI_ACL_HDR_SIZE);
3494
3495 handle = __le16_to_cpu(hdr->handle);
3496 flags = hci_flags(handle);
3497 handle = hci_handle(handle);
3498
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003499 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003500 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
3502 hdev->stat.acl_rx++;
3503
3504 hci_dev_lock(hdev);
3505 conn = hci_conn_hash_lookup_handle(hdev, handle);
3506 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003507
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003509 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003510
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003512 l2cap_recv_acldata(conn, skb, flags);
3513 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003515 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003516 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 }
3518
3519 kfree_skb(skb);
3520}
3521
3522/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003523static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524{
3525 struct hci_sco_hdr *hdr = (void *) skb->data;
3526 struct hci_conn *conn;
3527 __u16 handle;
3528
3529 skb_pull(skb, HCI_SCO_HDR_SIZE);
3530
3531 handle = __le16_to_cpu(hdr->handle);
3532
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003533 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534
3535 hdev->stat.sco_rx++;
3536
3537 hci_dev_lock(hdev);
3538 conn = hci_conn_hash_lookup_handle(hdev, handle);
3539 hci_dev_unlock(hdev);
3540
3541 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003543 sco_recv_scodata(conn, skb);
3544 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003546 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003547 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 }
3549
3550 kfree_skb(skb);
3551}
3552
Johan Hedberg9238f362013-03-05 20:37:48 +02003553static bool hci_req_is_complete(struct hci_dev *hdev)
3554{
3555 struct sk_buff *skb;
3556
3557 skb = skb_peek(&hdev->cmd_q);
3558 if (!skb)
3559 return true;
3560
3561 return bt_cb(skb)->req.start;
3562}
3563
Johan Hedberg42c6b122013-03-05 20:37:49 +02003564static void hci_resend_last(struct hci_dev *hdev)
3565{
3566 struct hci_command_hdr *sent;
3567 struct sk_buff *skb;
3568 u16 opcode;
3569
3570 if (!hdev->sent_cmd)
3571 return;
3572
3573 sent = (void *) hdev->sent_cmd->data;
3574 opcode = __le16_to_cpu(sent->opcode);
3575 if (opcode == HCI_OP_RESET)
3576 return;
3577
3578 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3579 if (!skb)
3580 return;
3581
3582 skb_queue_head(&hdev->cmd_q, skb);
3583 queue_work(hdev->workqueue, &hdev->cmd_work);
3584}
3585
Johan Hedberg9238f362013-03-05 20:37:48 +02003586void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3587{
3588 hci_req_complete_t req_complete = NULL;
3589 struct sk_buff *skb;
3590 unsigned long flags;
3591
3592 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3593
Johan Hedberg42c6b122013-03-05 20:37:49 +02003594 /* If the completed command doesn't match the last one that was
3595 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003596 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003597 if (!hci_sent_cmd_data(hdev, opcode)) {
3598 /* Some CSR based controllers generate a spontaneous
3599 * reset complete event during init and any pending
3600 * command will never be completed. In such a case we
3601 * need to resend whatever was the last sent
3602 * command.
3603 */
3604 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3605 hci_resend_last(hdev);
3606
Johan Hedberg9238f362013-03-05 20:37:48 +02003607 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003608 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003609
3610 /* If the command succeeded and there's still more commands in
3611 * this request the request is not yet complete.
3612 */
3613 if (!status && !hci_req_is_complete(hdev))
3614 return;
3615
3616 /* If this was the last command in a request the complete
3617 * callback would be found in hdev->sent_cmd instead of the
3618 * command queue (hdev->cmd_q).
3619 */
3620 if (hdev->sent_cmd) {
3621 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003622
3623 if (req_complete) {
3624 /* We must set the complete callback to NULL to
3625 * avoid calling the callback more than once if
3626 * this function gets called again.
3627 */
3628 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3629
Johan Hedberg9238f362013-03-05 20:37:48 +02003630 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003631 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003632 }
3633
3634 /* Remove all pending commands belonging to this request */
3635 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3636 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3637 if (bt_cb(skb)->req.start) {
3638 __skb_queue_head(&hdev->cmd_q, skb);
3639 break;
3640 }
3641
3642 req_complete = bt_cb(skb)->req.complete;
3643 kfree_skb(skb);
3644 }
3645 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3646
3647call_complete:
3648 if (req_complete)
3649 req_complete(hdev, status);
3650}
3651
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003652static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003654 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655 struct sk_buff *skb;
3656
3657 BT_DBG("%s", hdev->name);
3658
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003660 /* Send copy to monitor */
3661 hci_send_to_monitor(hdev, skb);
3662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 if (atomic_read(&hdev->promisc)) {
3664 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003665 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 }
3667
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003668 if (test_bit(HCI_RAW, &hdev->flags) ||
3669 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 kfree_skb(skb);
3671 continue;
3672 }
3673
3674 if (test_bit(HCI_INIT, &hdev->flags)) {
3675 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003676 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 case HCI_ACLDATA_PKT:
3678 case HCI_SCODATA_PKT:
3679 kfree_skb(skb);
3680 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 }
3683
3684 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003685 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003687 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 hci_event_packet(hdev, skb);
3689 break;
3690
3691 case HCI_ACLDATA_PKT:
3692 BT_DBG("%s ACL data packet", hdev->name);
3693 hci_acldata_packet(hdev, skb);
3694 break;
3695
3696 case HCI_SCODATA_PKT:
3697 BT_DBG("%s SCO data packet", hdev->name);
3698 hci_scodata_packet(hdev, skb);
3699 break;
3700
3701 default:
3702 kfree_skb(skb);
3703 break;
3704 }
3705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706}
3707
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003708static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003710 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 struct sk_buff *skb;
3712
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003713 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3714 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003717 if (atomic_read(&hdev->cmd_cnt)) {
3718 skb = skb_dequeue(&hdev->cmd_q);
3719 if (!skb)
3720 return;
3721
Wei Yongjun7585b972009-02-25 18:29:52 +08003722 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003724 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003725 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003727 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003728 if (test_bit(HCI_RESET, &hdev->flags))
3729 del_timer(&hdev->cmd_timer);
3730 else
3731 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003732 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 } else {
3734 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003735 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 }
3737 }
3738}