blob: bb95dddb5e500b3df61d57c733a09f8b0619ab52 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070058/* ---- HCI debugfs entries ---- */
59
60static int inquiry_cache_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
65
66 hci_dev_lock(hdev);
67
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71 &data->bdaddr,
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
77 }
78
79 hci_dev_unlock(hdev);
80
81 return 0;
82}
83
84static int inquiry_cache_open(struct inode *inode, struct file *file)
85{
86 return single_open(file, inquiry_cache_show, inode->i_private);
87}
88
89static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
91 .read = seq_read,
92 .llseek = seq_lseek,
93 .release = single_release,
94};
95
Marcel Holtmannebd1e332013-10-17 10:54:46 -070096static int auto_accept_delay_set(void *data, u64 val)
97{
98 struct hci_dev *hdev = data;
99
100 hci_dev_lock(hdev);
101 hdev->auto_accept_delay = val;
102 hci_dev_unlock(hdev);
103
104 return 0;
105}
106
107static int auto_accept_delay_get(void *data, u64 *val)
108{
109 struct hci_dev *hdev = data;
110
111 hci_dev_lock(hdev);
112 *val = hdev->auto_accept_delay;
113 hci_dev_unlock(hdev);
114
115 return 0;
116}
117
118DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
119 auto_accept_delay_set, "%llu\n");
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/* ---- HCI requests ---- */
122
Johan Hedberg42c6b122013-03-05 20:37:49 +0200123static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200125 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 if (hdev->req_status == HCI_REQ_PEND) {
128 hdev->req_result = result;
129 hdev->req_status = HCI_REQ_DONE;
130 wake_up_interruptible(&hdev->req_wait_q);
131 }
132}
133
134static void hci_req_cancel(struct hci_dev *hdev, int err)
135{
136 BT_DBG("%s err 0x%2.2x", hdev->name, err);
137
138 if (hdev->req_status == HCI_REQ_PEND) {
139 hdev->req_result = err;
140 hdev->req_status = HCI_REQ_CANCELED;
141 wake_up_interruptible(&hdev->req_wait_q);
142 }
143}
144
Fengguang Wu77a63e02013-04-20 16:24:31 +0300145static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
146 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300147{
148 struct hci_ev_cmd_complete *ev;
149 struct hci_event_hdr *hdr;
150 struct sk_buff *skb;
151
152 hci_dev_lock(hdev);
153
154 skb = hdev->recv_evt;
155 hdev->recv_evt = NULL;
156
157 hci_dev_unlock(hdev);
158
159 if (!skb)
160 return ERR_PTR(-ENODATA);
161
162 if (skb->len < sizeof(*hdr)) {
163 BT_ERR("Too short HCI event");
164 goto failed;
165 }
166
167 hdr = (void *) skb->data;
168 skb_pull(skb, HCI_EVENT_HDR_SIZE);
169
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300170 if (event) {
171 if (hdr->evt != event)
172 goto failed;
173 return skb;
174 }
175
Johan Hedberg75e84b72013-04-02 13:35:04 +0300176 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
177 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
178 goto failed;
179 }
180
181 if (skb->len < sizeof(*ev)) {
182 BT_ERR("Too short cmd_complete event");
183 goto failed;
184 }
185
186 ev = (void *) skb->data;
187 skb_pull(skb, sizeof(*ev));
188
189 if (opcode == __le16_to_cpu(ev->opcode))
190 return skb;
191
192 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
193 __le16_to_cpu(ev->opcode));
194
195failed:
196 kfree_skb(skb);
197 return ERR_PTR(-ENODATA);
198}
199
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300200struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300201 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300202{
203 DECLARE_WAITQUEUE(wait, current);
204 struct hci_request req;
205 int err = 0;
206
207 BT_DBG("%s", hdev->name);
208
209 hci_req_init(&req, hdev);
210
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300211 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300212
213 hdev->req_status = HCI_REQ_PEND;
214
215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0)
217 return ERR_PTR(err);
218
219 add_wait_queue(&hdev->req_wait_q, &wait);
220 set_current_state(TASK_INTERRUPTIBLE);
221
222 schedule_timeout(timeout);
223
224 remove_wait_queue(&hdev->req_wait_q, &wait);
225
226 if (signal_pending(current))
227 return ERR_PTR(-EINTR);
228
229 switch (hdev->req_status) {
230 case HCI_REQ_DONE:
231 err = -bt_to_errno(hdev->req_result);
232 break;
233
234 case HCI_REQ_CANCELED:
235 err = -hdev->req_result;
236 break;
237
238 default:
239 err = -ETIMEDOUT;
240 break;
241 }
242
243 hdev->req_status = hdev->req_result = 0;
244
245 BT_DBG("%s end: err %d", hdev->name, err);
246
247 if (err < 0)
248 return ERR_PTR(err);
249
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300250 return hci_get_cmd_complete(hdev, opcode, event);
251}
252EXPORT_SYMBOL(__hci_cmd_sync_ev);
253
254struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300255 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300256{
257 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300258}
259EXPORT_SYMBOL(__hci_cmd_sync);
260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*func)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200267 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 DECLARE_WAITQUEUE(wait, current);
269 int err = 0;
270
271 BT_DBG("%s start", hdev->name);
272
Johan Hedberg42c6b122013-03-05 20:37:49 +0200273 hci_req_init(&req, hdev);
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hdev->req_status = HCI_REQ_PEND;
276
Johan Hedberg42c6b122013-03-05 20:37:49 +0200277 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279 err = hci_req_run(&req, hci_req_sync_complete);
280 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200281 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300282
283 /* ENODATA means the HCI request command queue is empty.
284 * This can happen when a request with conditionals doesn't
285 * trigger any commands to be sent. This is normal behavior
286 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 */
Andre Guedes920c8302013-03-08 11:20:15 -0300288 if (err == -ENODATA)
289 return 0;
290
291 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200292 }
293
Andre Guedesbc4445c2013-03-08 11:20:13 -0300294 add_wait_queue(&hdev->req_wait_q, &wait);
295 set_current_state(TASK_INTERRUPTIBLE);
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 schedule_timeout(timeout);
298
299 remove_wait_queue(&hdev->req_wait_q, &wait);
300
301 if (signal_pending(current))
302 return -EINTR;
303
304 switch (hdev->req_status) {
305 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700306 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 break;
308
309 case HCI_REQ_CANCELED:
310 err = -hdev->req_result;
311 break;
312
313 default:
314 err = -ETIMEDOUT;
315 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Johan Hedberga5040ef2011-01-10 13:28:59 +0200318 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 BT_DBG("%s end: err %d", hdev->name, err);
321
322 return err;
323}
324
Johan Hedberg01178cd2013-03-05 20:37:41 +0200325static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200326 void (*req)(struct hci_request *req,
327 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200328 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
330 int ret;
331
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200332 if (!test_bit(HCI_UP, &hdev->flags))
333 return -ENETDOWN;
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 /* Serialize all requests */
336 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200337 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 hci_req_unlock(hdev);
339
340 return ret;
341}
342
Johan Hedberg42c6b122013-03-05 20:37:49 +0200343static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 set_bit(HCI_RESET, &req->hdev->flags);
349 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
351
Johan Hedberg42c6b122013-03-05 20:37:49 +0200352static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200359 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200361
362 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200367{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200369
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200370 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300372
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700373 /* Read Local Supported Commands */
374 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
375
376 /* Read Local Supported Features */
377 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
378
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300379 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300381
382 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200383 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700384
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700385 /* Read Flow Control Mode */
386 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
387
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700388 /* Read Location Data */
389 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200390}
391
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200393{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200395
396 BT_DBG("%s %ld", hdev->name, opt);
397
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300398 /* Reset */
399 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300401
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200402 switch (hdev->dev_type) {
403 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200405 break;
406
407 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200408 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200409 break;
410
411 default:
412 BT_ERR("Unknown device type %d", hdev->dev_type);
413 break;
414 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200415}
416
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700419 struct hci_dev *hdev = req->hdev;
420
Johan Hedberg2177bab2013-03-05 20:37:43 +0200421 __le16 param;
422 __u8 flt_type;
423
424 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200425 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200426
427 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200428 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200429
430 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200432
433 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200434 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200435
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700436 /* Read Number of Supported IAC */
437 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
438
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700439 /* Read Current IAC LAP */
440 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
441
Johan Hedberg2177bab2013-03-05 20:37:43 +0200442 /* Clear Event Filters */
443 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445
446 /* Connection accept timeout ~20 secs */
447 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700450 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
451 * but it does not support page scan related HCI commands.
452 */
453 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500454 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
455 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
456 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457}
458
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300461 struct hci_dev *hdev = req->hdev;
462
Johan Hedberg2177bab2013-03-05 20:37:43 +0200463 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200464 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200465
466 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200467 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468
469 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200470 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200471
472 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474
475 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300477
478 /* LE-only controllers have LE implicitly enabled */
479 if (!lmp_bredr_capable(hdev))
480 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200481}
482
483static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
484{
485 if (lmp_ext_inq_capable(hdev))
486 return 0x02;
487
488 if (lmp_inq_rssi_capable(hdev))
489 return 0x01;
490
491 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
492 hdev->lmp_subver == 0x0757)
493 return 0x01;
494
495 if (hdev->manufacturer == 15) {
496 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
497 return 0x01;
498 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
499 return 0x01;
500 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
501 return 0x01;
502 }
503
504 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
505 hdev->lmp_subver == 0x1805)
506 return 0x01;
507
508 return 0x00;
509}
510
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200512{
513 u8 mode;
514
Johan Hedberg42c6b122013-03-05 20:37:49 +0200515 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200516
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518}
519
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 struct hci_dev *hdev = req->hdev;
523
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524 /* The second byte is 0xff instead of 0x9f (two reserved bits
525 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
526 * command otherwise.
527 */
528 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
529
530 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
531 * any event mask for pre 1.2 devices.
532 */
533 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
534 return;
535
536 if (lmp_bredr_capable(hdev)) {
537 events[4] |= 0x01; /* Flow Specification Complete */
538 events[4] |= 0x02; /* Inquiry Result with RSSI */
539 events[4] |= 0x04; /* Read Remote Extended Features Complete */
540 events[5] |= 0x08; /* Synchronous Connection Complete */
541 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700542 } else {
543 /* Use a different default for LE-only devices */
544 memset(events, 0, sizeof(events));
545 events[0] |= 0x10; /* Disconnection Complete */
546 events[0] |= 0x80; /* Encryption Change */
547 events[1] |= 0x08; /* Read Remote Version Information Complete */
548 events[1] |= 0x20; /* Command Complete */
549 events[1] |= 0x40; /* Command Status */
550 events[1] |= 0x80; /* Hardware Error */
551 events[2] |= 0x04; /* Number of Completed Packets */
552 events[3] |= 0x02; /* Data Buffer Overflow */
553 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554 }
555
556 if (lmp_inq_rssi_capable(hdev))
557 events[4] |= 0x02; /* Inquiry Result with RSSI */
558
559 if (lmp_sniffsubr_capable(hdev))
560 events[5] |= 0x20; /* Sniff Subrating */
561
562 if (lmp_pause_enc_capable(hdev))
563 events[5] |= 0x80; /* Encryption Key Refresh Complete */
564
565 if (lmp_ext_inq_capable(hdev))
566 events[5] |= 0x40; /* Extended Inquiry Result */
567
568 if (lmp_no_flush_capable(hdev))
569 events[7] |= 0x01; /* Enhanced Flush Complete */
570
571 if (lmp_lsto_capable(hdev))
572 events[6] |= 0x80; /* Link Supervision Timeout Changed */
573
574 if (lmp_ssp_capable(hdev)) {
575 events[6] |= 0x01; /* IO Capability Request */
576 events[6] |= 0x02; /* IO Capability Response */
577 events[6] |= 0x04; /* User Confirmation Request */
578 events[6] |= 0x08; /* User Passkey Request */
579 events[6] |= 0x10; /* Remote OOB Data Request */
580 events[6] |= 0x20; /* Simple Pairing Complete */
581 events[7] |= 0x04; /* User Passkey Notification */
582 events[7] |= 0x08; /* Keypress Notification */
583 events[7] |= 0x10; /* Remote Host Supported
584 * Features Notification
585 */
586 }
587
588 if (lmp_le_capable(hdev))
589 events[7] |= 0x20; /* LE Meta-Event */
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592
593 if (lmp_le_capable(hdev)) {
594 memset(events, 0, sizeof(events));
595 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
597 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 }
599}
600
Johan Hedberg42c6b122013-03-05 20:37:49 +0200601static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200602{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603 struct hci_dev *hdev = req->hdev;
604
Johan Hedberg2177bab2013-03-05 20:37:43 +0200605 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300607 else
608 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200609
610 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612
Johan Hedberg42c6b122013-03-05 20:37:49 +0200613 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300615 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
616 * local supported commands HCI command.
617 */
618 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200620
621 if (lmp_ssp_capable(hdev)) {
622 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
623 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
625 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626 } else {
627 struct hci_cp_write_eir cp;
628
629 memset(hdev->eir, 0, sizeof(hdev->eir));
630 memset(&cp, 0, sizeof(cp));
631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 }
634 }
635
636 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200638
639 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641
642 if (lmp_ext_feat_capable(hdev)) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200648 }
649
650 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
651 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200652 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
653 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654 }
655}
656
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660 struct hci_cp_write_def_link_policy cp;
661 u16 link_policy = 0;
662
663 if (lmp_rswitch_capable(hdev))
664 link_policy |= HCI_LP_RSWITCH;
665 if (lmp_hold_capable(hdev))
666 link_policy |= HCI_LP_HOLD;
667 if (lmp_sniff_capable(hdev))
668 link_policy |= HCI_LP_SNIFF;
669 if (lmp_park_capable(hdev))
670 link_policy |= HCI_LP_PARK;
671
672 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674}
675
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 struct hci_cp_write_le_host_supported cp;
680
Johan Hedbergc73eee92013-04-19 18:35:21 +0300681 /* LE-only devices do not support explicit enablement */
682 if (!lmp_bredr_capable(hdev))
683 return;
684
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 memset(&cp, 0, sizeof(cp));
686
687 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
688 cp.le = 0x01;
689 cp.simul = lmp_le_br_capable(hdev);
690 }
691
692 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
694 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695}
696
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300697static void hci_set_event_mask_page_2(struct hci_request *req)
698{
699 struct hci_dev *hdev = req->hdev;
700 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
701
702 /* If Connectionless Slave Broadcast master role is supported
703 * enable all necessary events for it.
704 */
705 if (hdev->features[2][0] & 0x01) {
706 events[1] |= 0x40; /* Triggered Clock Capture */
707 events[1] |= 0x80; /* Synchronization Train Complete */
708 events[2] |= 0x10; /* Slave Page Response Timeout */
709 events[2] |= 0x20; /* CSB Channel Map Change */
710 }
711
712 /* If Connectionless Slave Broadcast slave role is supported
713 * enable all necessary events for it.
714 */
715 if (hdev->features[2][0] & 0x02) {
716 events[2] |= 0x01; /* Synchronization Train Received */
717 events[2] |= 0x02; /* CSB Receive */
718 events[2] |= 0x04; /* CSB Timeout */
719 events[2] |= 0x08; /* Truncated Page Complete */
720 }
721
722 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
723}
724
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200726{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300728 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200729
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100730 /* Some Broadcom based Bluetooth controllers do not support the
731 * Delete Stored Link Key command. They are clearly indicating its
732 * absence in the bit mask of supported commands.
733 *
734 * Check the supported commands and only if the the command is marked
735 * as supported send it. If not supported assume that the controller
736 * does not have actual support for stored link keys which makes this
737 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700738 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300739 if (hdev->commands[6] & 0x80) {
740 struct hci_cp_delete_stored_link_key cp;
741
742 bacpy(&cp.bdaddr, BDADDR_ANY);
743 cp.delete_all = 0x01;
744 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
745 sizeof(cp), &cp);
746 }
747
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200750
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700751 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300753
754 /* Read features beyond page 1 if available */
755 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
756 struct hci_cp_read_local_ext_features cp;
757
758 cp.page = p;
759 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
760 sizeof(cp), &cp);
761 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200762}
763
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300764static void hci_init4_req(struct hci_request *req, unsigned long opt)
765{
766 struct hci_dev *hdev = req->hdev;
767
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300768 /* Set event mask page 2 if the HCI command for it is supported */
769 if (hdev->commands[22] & 0x04)
770 hci_set_event_mask_page_2(req);
771
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300772 /* Check for Synchronization Train support */
773 if (hdev->features[2][0] & 0x04)
774 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
775}
776
Johan Hedberg2177bab2013-03-05 20:37:43 +0200777static int __hci_init(struct hci_dev *hdev)
778{
779 int err;
780
781 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
782 if (err < 0)
783 return err;
784
785 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
786 * BR/EDR/LE type controllers. AMP controllers only need the
787 * first stage init.
788 */
789 if (hdev->dev_type != HCI_BREDR)
790 return 0;
791
792 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
793 if (err < 0)
794 return err;
795
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300796 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
797 if (err < 0)
798 return err;
799
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700800 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
801 if (err < 0)
802 return err;
803
804 /* Only create debugfs entries during the initial setup
805 * phase and not every time the controller gets powered on.
806 */
807 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
808 return 0;
809
810 if (lmp_bredr_capable(hdev)) {
811 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
812 hdev, &inquiry_cache_fops);
813 }
814
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700815 if (lmp_ssp_capable(hdev))
816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
817 hdev, &auto_accept_delay_fops);
818
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700819 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200820}
821
Johan Hedberg42c6b122013-03-05 20:37:49 +0200822static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
824 __u8 scan = opt;
825
Johan Hedberg42c6b122013-03-05 20:37:49 +0200826 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200829 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830}
831
Johan Hedberg42c6b122013-03-05 20:37:49 +0200832static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833{
834 __u8 auth = opt;
835
Johan Hedberg42c6b122013-03-05 20:37:49 +0200836 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
838 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200839 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
841
Johan Hedberg42c6b122013-03-05 20:37:49 +0200842static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
844 __u8 encrypt = opt;
845
Johan Hedberg42c6b122013-03-05 20:37:49 +0200846 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200848 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200853{
854 __le16 policy = cpu_to_le16(opt);
855
Johan Hedberg42c6b122013-03-05 20:37:49 +0200856 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200857
858 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200859 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200860}
861
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900862/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 * Device is held on return. */
864struct hci_dev *hci_dev_get(int index)
865{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200866 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 BT_DBG("%d", index);
869
870 if (index < 0)
871 return NULL;
872
873 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200874 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 if (d->id == index) {
876 hdev = hci_dev_hold(d);
877 break;
878 }
879 }
880 read_unlock(&hci_dev_list_lock);
881 return hdev;
882}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200885
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200886bool hci_discovery_active(struct hci_dev *hdev)
887{
888 struct discovery_state *discov = &hdev->discovery;
889
Andre Guedes6fbe1952012-02-03 17:47:58 -0300890 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300891 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300892 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200893 return true;
894
Andre Guedes6fbe1952012-02-03 17:47:58 -0300895 default:
896 return false;
897 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200898}
899
Johan Hedbergff9ef572012-01-04 14:23:45 +0200900void hci_discovery_set_state(struct hci_dev *hdev, int state)
901{
902 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
903
904 if (hdev->discovery.state == state)
905 return;
906
907 switch (state) {
908 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300909 if (hdev->discovery.state != DISCOVERY_STARTING)
910 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200911 break;
912 case DISCOVERY_STARTING:
913 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300914 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200915 mgmt_discovering(hdev, 1);
916 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200917 case DISCOVERY_RESOLVING:
918 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200919 case DISCOVERY_STOPPING:
920 break;
921 }
922
923 hdev->discovery.state = state;
924}
925
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300926void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Johan Hedberg30883512012-01-04 14:16:21 +0200928 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200929 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Johan Hedberg561aafb2012-01-04 13:31:59 +0200931 list_for_each_entry_safe(p, n, &cache->all, all) {
932 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200933 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200935
936 INIT_LIST_HEAD(&cache->unknown);
937 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938}
939
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300940struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
941 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
Johan Hedberg30883512012-01-04 14:16:21 +0200943 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 struct inquiry_entry *e;
945
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300946 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Johan Hedberg561aafb2012-01-04 13:31:59 +0200948 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200950 return e;
951 }
952
953 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
955
Johan Hedberg561aafb2012-01-04 13:31:59 +0200956struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300957 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200958{
Johan Hedberg30883512012-01-04 14:16:21 +0200959 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200960 struct inquiry_entry *e;
961
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300962 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200963
964 list_for_each_entry(e, &cache->unknown, list) {
965 if (!bacmp(&e->data.bdaddr, bdaddr))
966 return e;
967 }
968
969 return NULL;
970}
971
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200972struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300973 bdaddr_t *bdaddr,
974 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200975{
976 struct discovery_state *cache = &hdev->discovery;
977 struct inquiry_entry *e;
978
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300979 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200980
981 list_for_each_entry(e, &cache->resolve, list) {
982 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
983 return e;
984 if (!bacmp(&e->data.bdaddr, bdaddr))
985 return e;
986 }
987
988 return NULL;
989}
990
Johan Hedberga3d4e202012-01-09 00:53:02 +0200991void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300992 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200993{
994 struct discovery_state *cache = &hdev->discovery;
995 struct list_head *pos = &cache->resolve;
996 struct inquiry_entry *p;
997
998 list_del(&ie->list);
999
1000 list_for_each_entry(p, &cache->resolve, list) {
1001 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001002 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001003 break;
1004 pos = &p->list;
1005 }
1006
1007 list_add(&ie->list, pos);
1008}
1009
Johan Hedberg31754052012-01-04 13:39:52 +02001010bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001011 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012{
Johan Hedberg30883512012-01-04 14:16:21 +02001013 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001014 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001016 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Szymon Janc2b2fec42012-11-20 11:38:54 +01001018 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1019
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001020 if (ssp)
1021 *ssp = data->ssp_mode;
1022
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001023 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001024 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001025 if (ie->data.ssp_mode && ssp)
1026 *ssp = true;
1027
Johan Hedberga3d4e202012-01-09 00:53:02 +02001028 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001029 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001030 ie->data.rssi = data->rssi;
1031 hci_inquiry_cache_update_resolve(hdev, ie);
1032 }
1033
Johan Hedberg561aafb2012-01-04 13:31:59 +02001034 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001035 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001036
Johan Hedberg561aafb2012-01-04 13:31:59 +02001037 /* Entry not in the cache. Add new one. */
1038 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1039 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001040 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001041
1042 list_add(&ie->all, &cache->all);
1043
1044 if (name_known) {
1045 ie->name_state = NAME_KNOWN;
1046 } else {
1047 ie->name_state = NAME_NOT_KNOWN;
1048 list_add(&ie->list, &cache->unknown);
1049 }
1050
1051update:
1052 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001053 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001054 ie->name_state = NAME_KNOWN;
1055 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 }
1057
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001058 memcpy(&ie->data, data, sizeof(*data));
1059 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001061
1062 if (ie->name_state == NAME_NOT_KNOWN)
1063 return false;
1064
1065 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066}
1067
1068static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1069{
Johan Hedberg30883512012-01-04 14:16:21 +02001070 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 struct inquiry_info *info = (struct inquiry_info *) buf;
1072 struct inquiry_entry *e;
1073 int copied = 0;
1074
Johan Hedberg561aafb2012-01-04 13:31:59 +02001075 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001077
1078 if (copied >= num)
1079 break;
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 bacpy(&info->bdaddr, &data->bdaddr);
1082 info->pscan_rep_mode = data->pscan_rep_mode;
1083 info->pscan_period_mode = data->pscan_period_mode;
1084 info->pscan_mode = data->pscan_mode;
1085 memcpy(info->dev_class, data->dev_class, 3);
1086 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001089 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 }
1091
1092 BT_DBG("cache %p, copied %d", cache, copied);
1093 return copied;
1094}
1095
Johan Hedberg42c6b122013-03-05 20:37:49 +02001096static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097{
1098 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001099 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 struct hci_cp_inquiry cp;
1101
1102 BT_DBG("%s", hdev->name);
1103
1104 if (test_bit(HCI_INQUIRY, &hdev->flags))
1105 return;
1106
1107 /* Start Inquiry */
1108 memcpy(&cp.lap, &ir->lap, 3);
1109 cp.length = ir->length;
1110 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001111 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112}
1113
Andre Guedes3e13fa12013-03-27 20:04:56 -03001114static int wait_inquiry(void *word)
1115{
1116 schedule();
1117 return signal_pending(current);
1118}
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120int hci_inquiry(void __user *arg)
1121{
1122 __u8 __user *ptr = arg;
1123 struct hci_inquiry_req ir;
1124 struct hci_dev *hdev;
1125 int err = 0, do_inquiry = 0, max_rsp;
1126 long timeo;
1127 __u8 *buf;
1128
1129 if (copy_from_user(&ir, ptr, sizeof(ir)))
1130 return -EFAULT;
1131
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001132 hdev = hci_dev_get(ir.dev_id);
1133 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 return -ENODEV;
1135
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001136 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1137 err = -EBUSY;
1138 goto done;
1139 }
1140
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001141 if (hdev->dev_type != HCI_BREDR) {
1142 err = -EOPNOTSUPP;
1143 goto done;
1144 }
1145
Johan Hedberg56f87902013-10-02 13:43:13 +03001146 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1147 err = -EOPNOTSUPP;
1148 goto done;
1149 }
1150
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001151 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001152 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001153 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001154 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 do_inquiry = 1;
1156 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001157 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Marcel Holtmann04837f62006-07-03 10:02:33 +02001159 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001160
1161 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001162 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1163 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001164 if (err < 0)
1165 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001166
1167 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1168 * cleared). If it is interrupted by a signal, return -EINTR.
1169 */
1170 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1171 TASK_INTERRUPTIBLE))
1172 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001175 /* for unlimited number of responses we will use buffer with
1176 * 255 entries
1177 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1179
1180 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1181 * copy it to the user space.
1182 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001183 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001184 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 err = -ENOMEM;
1186 goto done;
1187 }
1188
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001189 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001191 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
1193 BT_DBG("num_rsp %d", ir.num_rsp);
1194
1195 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1196 ptr += sizeof(ir);
1197 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001198 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001200 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 err = -EFAULT;
1202
1203 kfree(buf);
1204
1205done:
1206 hci_dev_put(hdev);
1207 return err;
1208}
1209
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001210static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 int ret = 0;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 BT_DBG("%s %p", hdev->name, hdev);
1215
1216 hci_req_lock(hdev);
1217
Johan Hovold94324962012-03-15 14:48:41 +01001218 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1219 ret = -ENODEV;
1220 goto done;
1221 }
1222
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001223 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1224 /* Check for rfkill but allow the HCI setup stage to
1225 * proceed (which in itself doesn't cause any RF activity).
1226 */
1227 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1228 ret = -ERFKILL;
1229 goto done;
1230 }
1231
1232 /* Check for valid public address or a configured static
1233 * random adddress, but let the HCI setup proceed to
1234 * be able to determine if there is a public address
1235 * or not.
1236 *
1237 * This check is only valid for BR/EDR controllers
1238 * since AMP controllers do not have an address.
1239 */
1240 if (hdev->dev_type == HCI_BREDR &&
1241 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1242 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1243 ret = -EADDRNOTAVAIL;
1244 goto done;
1245 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001246 }
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 if (test_bit(HCI_UP, &hdev->flags)) {
1249 ret = -EALREADY;
1250 goto done;
1251 }
1252
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 if (hdev->open(hdev)) {
1254 ret = -EIO;
1255 goto done;
1256 }
1257
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001258 atomic_set(&hdev->cmd_cnt, 1);
1259 set_bit(HCI_INIT, &hdev->flags);
1260
1261 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1262 ret = hdev->setup(hdev);
1263
1264 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001265 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1266 set_bit(HCI_RAW, &hdev->flags);
1267
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001268 if (!test_bit(HCI_RAW, &hdev->flags) &&
1269 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001270 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
1272
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001273 clear_bit(HCI_INIT, &hdev->flags);
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 if (!ret) {
1276 hci_dev_hold(hdev);
1277 set_bit(HCI_UP, &hdev->flags);
1278 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001279 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001280 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001281 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001282 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001283 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001284 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001285 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001286 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001288 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001289 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001290 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
1292 skb_queue_purge(&hdev->cmd_q);
1293 skb_queue_purge(&hdev->rx_q);
1294
1295 if (hdev->flush)
1296 hdev->flush(hdev);
1297
1298 if (hdev->sent_cmd) {
1299 kfree_skb(hdev->sent_cmd);
1300 hdev->sent_cmd = NULL;
1301 }
1302
1303 hdev->close(hdev);
1304 hdev->flags = 0;
1305 }
1306
1307done:
1308 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return ret;
1310}
1311
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001312/* ---- HCI ioctl helpers ---- */
1313
1314int hci_dev_open(__u16 dev)
1315{
1316 struct hci_dev *hdev;
1317 int err;
1318
1319 hdev = hci_dev_get(dev);
1320 if (!hdev)
1321 return -ENODEV;
1322
Johan Hedberge1d08f42013-10-01 22:44:50 +03001323 /* We need to ensure that no other power on/off work is pending
1324 * before proceeding to call hci_dev_do_open. This is
1325 * particularly important if the setup procedure has not yet
1326 * completed.
1327 */
1328 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1329 cancel_delayed_work(&hdev->power_off);
1330
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001331 /* After this call it is guaranteed that the setup procedure
1332 * has finished. This means that error conditions like RFKILL
1333 * or no valid public or static random address apply.
1334 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001335 flush_workqueue(hdev->req_workqueue);
1336
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001337 err = hci_dev_do_open(hdev);
1338
1339 hci_dev_put(hdev);
1340
1341 return err;
1342}
1343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344static int hci_dev_do_close(struct hci_dev *hdev)
1345{
1346 BT_DBG("%s %p", hdev->name, hdev);
1347
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001348 cancel_delayed_work(&hdev->power_off);
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 hci_req_cancel(hdev, ENODEV);
1351 hci_req_lock(hdev);
1352
1353 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001354 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 hci_req_unlock(hdev);
1356 return 0;
1357 }
1358
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001359 /* Flush RX and TX works */
1360 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001361 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001363 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001364 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001365 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001366 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001367 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001368 }
1369
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001370 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001371 cancel_delayed_work(&hdev->service_cache);
1372
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001373 cancel_delayed_work_sync(&hdev->le_scan_disable);
1374
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001375 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001376 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001378 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380 hci_notify(hdev, HCI_DEV_DOWN);
1381
1382 if (hdev->flush)
1383 hdev->flush(hdev);
1384
1385 /* Reset device */
1386 skb_queue_purge(&hdev->cmd_q);
1387 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001388 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001389 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001390 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001392 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 clear_bit(HCI_INIT, &hdev->flags);
1394 }
1395
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001396 /* flush cmd work */
1397 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
1399 /* Drop queues */
1400 skb_queue_purge(&hdev->rx_q);
1401 skb_queue_purge(&hdev->cmd_q);
1402 skb_queue_purge(&hdev->raw_q);
1403
1404 /* Drop last sent command */
1405 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001406 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 kfree_skb(hdev->sent_cmd);
1408 hdev->sent_cmd = NULL;
1409 }
1410
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001411 kfree_skb(hdev->recv_evt);
1412 hdev->recv_evt = NULL;
1413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 /* After this point our queues are empty
1415 * and no tasks are scheduled. */
1416 hdev->close(hdev);
1417
Johan Hedberg35b973c2013-03-15 17:06:59 -05001418 /* Clear flags */
1419 hdev->flags = 0;
1420 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1421
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001422 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1423 if (hdev->dev_type == HCI_BREDR) {
1424 hci_dev_lock(hdev);
1425 mgmt_powered(hdev, 0);
1426 hci_dev_unlock(hdev);
1427 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001428 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001429
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001430 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001431 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001432
Johan Hedberge59fda82012-02-22 18:11:53 +02001433 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001434 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 hci_req_unlock(hdev);
1437
1438 hci_dev_put(hdev);
1439 return 0;
1440}
1441
1442int hci_dev_close(__u16 dev)
1443{
1444 struct hci_dev *hdev;
1445 int err;
1446
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001447 hdev = hci_dev_get(dev);
1448 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001450
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001451 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1452 err = -EBUSY;
1453 goto done;
1454 }
1455
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001456 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1457 cancel_delayed_work(&hdev->power_off);
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001460
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001461done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 hci_dev_put(hdev);
1463 return err;
1464}
1465
1466int hci_dev_reset(__u16 dev)
1467{
1468 struct hci_dev *hdev;
1469 int ret = 0;
1470
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001471 hdev = hci_dev_get(dev);
1472 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return -ENODEV;
1474
1475 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Marcel Holtmann808a0492013-08-26 20:57:58 -07001477 if (!test_bit(HCI_UP, &hdev->flags)) {
1478 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001482 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1483 ret = -EBUSY;
1484 goto done;
1485 }
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 /* Drop queues */
1488 skb_queue_purge(&hdev->rx_q);
1489 skb_queue_purge(&hdev->cmd_q);
1490
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001491 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001492 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001494 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
1496 if (hdev->flush)
1497 hdev->flush(hdev);
1498
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001499 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001500 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
1502 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001503 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
1505done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 hci_req_unlock(hdev);
1507 hci_dev_put(hdev);
1508 return ret;
1509}
1510
1511int hci_dev_reset_stat(__u16 dev)
1512{
1513 struct hci_dev *hdev;
1514 int ret = 0;
1515
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001516 hdev = hci_dev_get(dev);
1517 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 return -ENODEV;
1519
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001520 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1521 ret = -EBUSY;
1522 goto done;
1523 }
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1526
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001527done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return ret;
1530}
1531
1532int hci_dev_cmd(unsigned int cmd, void __user *arg)
1533{
1534 struct hci_dev *hdev;
1535 struct hci_dev_req dr;
1536 int err = 0;
1537
1538 if (copy_from_user(&dr, arg, sizeof(dr)))
1539 return -EFAULT;
1540
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001541 hdev = hci_dev_get(dr.dev_id);
1542 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 return -ENODEV;
1544
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001545 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1546 err = -EBUSY;
1547 goto done;
1548 }
1549
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001550 if (hdev->dev_type != HCI_BREDR) {
1551 err = -EOPNOTSUPP;
1552 goto done;
1553 }
1554
Johan Hedberg56f87902013-10-02 13:43:13 +03001555 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1556 err = -EOPNOTSUPP;
1557 goto done;
1558 }
1559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 switch (cmd) {
1561 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001562 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1563 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 break;
1565
1566 case HCISETENCRYPT:
1567 if (!lmp_encrypt_capable(hdev)) {
1568 err = -EOPNOTSUPP;
1569 break;
1570 }
1571
1572 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1573 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001574 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1575 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 if (err)
1577 break;
1578 }
1579
Johan Hedberg01178cd2013-03-05 20:37:41 +02001580 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1581 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 break;
1583
1584 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001585 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1586 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 break;
1588
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001589 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001590 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1591 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001592 break;
1593
1594 case HCISETLINKMODE:
1595 hdev->link_mode = ((__u16) dr.dev_opt) &
1596 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1597 break;
1598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 case HCISETPTYPE:
1600 hdev->pkt_type = (__u16) dr.dev_opt;
1601 break;
1602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001604 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1605 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 break;
1607
1608 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001609 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1610 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 break;
1612
1613 default:
1614 err = -EINVAL;
1615 break;
1616 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001617
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001618done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 hci_dev_put(hdev);
1620 return err;
1621}
1622
1623int hci_get_dev_list(void __user *arg)
1624{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001625 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 struct hci_dev_list_req *dl;
1627 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 int n = 0, size, err;
1629 __u16 dev_num;
1630
1631 if (get_user(dev_num, (__u16 __user *) arg))
1632 return -EFAULT;
1633
1634 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1635 return -EINVAL;
1636
1637 size = sizeof(*dl) + dev_num * sizeof(*dr);
1638
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001639 dl = kzalloc(size, GFP_KERNEL);
1640 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 return -ENOMEM;
1642
1643 dr = dl->dev_req;
1644
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001645 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001646 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001647 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001648 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001649
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001650 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1651 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 (dr + n)->dev_id = hdev->id;
1654 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001655
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 if (++n >= dev_num)
1657 break;
1658 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001659 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661 dl->dev_num = n;
1662 size = sizeof(*dl) + n * sizeof(*dr);
1663
1664 err = copy_to_user(arg, dl, size);
1665 kfree(dl);
1666
1667 return err ? -EFAULT : 0;
1668}
1669
1670int hci_get_dev_info(void __user *arg)
1671{
1672 struct hci_dev *hdev;
1673 struct hci_dev_info di;
1674 int err = 0;
1675
1676 if (copy_from_user(&di, arg, sizeof(di)))
1677 return -EFAULT;
1678
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001679 hdev = hci_dev_get(di.dev_id);
1680 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 return -ENODEV;
1682
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001683 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001684 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001685
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001686 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1687 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 strcpy(di.name, hdev->name);
1690 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001691 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 di.flags = hdev->flags;
1693 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001694 if (lmp_bredr_capable(hdev)) {
1695 di.acl_mtu = hdev->acl_mtu;
1696 di.acl_pkts = hdev->acl_pkts;
1697 di.sco_mtu = hdev->sco_mtu;
1698 di.sco_pkts = hdev->sco_pkts;
1699 } else {
1700 di.acl_mtu = hdev->le_mtu;
1701 di.acl_pkts = hdev->le_pkts;
1702 di.sco_mtu = 0;
1703 di.sco_pkts = 0;
1704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 di.link_policy = hdev->link_policy;
1706 di.link_mode = hdev->link_mode;
1707
1708 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1709 memcpy(&di.features, &hdev->features, sizeof(di.features));
1710
1711 if (copy_to_user(arg, &di, sizeof(di)))
1712 err = -EFAULT;
1713
1714 hci_dev_put(hdev);
1715
1716 return err;
1717}
1718
1719/* ---- Interface to HCI drivers ---- */
1720
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001721static int hci_rfkill_set_block(void *data, bool blocked)
1722{
1723 struct hci_dev *hdev = data;
1724
1725 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1726
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001727 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1728 return -EBUSY;
1729
Johan Hedberg5e130362013-09-13 08:58:17 +03001730 if (blocked) {
1731 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001732 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1733 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001734 } else {
1735 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001736 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001737
1738 return 0;
1739}
1740
1741static const struct rfkill_ops hci_rfkill_ops = {
1742 .set_block = hci_rfkill_set_block,
1743};
1744
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001745static void hci_power_on(struct work_struct *work)
1746{
1747 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001748 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001749
1750 BT_DBG("%s", hdev->name);
1751
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001752 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001753 if (err < 0) {
1754 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001755 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001756 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001757
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001758 /* During the HCI setup phase, a few error conditions are
1759 * ignored and they need to be checked now. If they are still
1760 * valid, it is important to turn the device back off.
1761 */
1762 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1763 (hdev->dev_type == HCI_BREDR &&
1764 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1765 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001766 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1767 hci_dev_do_close(hdev);
1768 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001769 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1770 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001771 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001772
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001773 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001774 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001775}
1776
1777static void hci_power_off(struct work_struct *work)
1778{
Johan Hedberg32435532011-11-07 22:16:04 +02001779 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001780 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001781
1782 BT_DBG("%s", hdev->name);
1783
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001784 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001785}
1786
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001787static void hci_discov_off(struct work_struct *work)
1788{
1789 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001790
1791 hdev = container_of(work, struct hci_dev, discov_off.work);
1792
1793 BT_DBG("%s", hdev->name);
1794
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001795 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001796}
1797
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001798int hci_uuids_clear(struct hci_dev *hdev)
1799{
Johan Hedberg48210022013-01-27 00:31:28 +02001800 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001801
Johan Hedberg48210022013-01-27 00:31:28 +02001802 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1803 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001804 kfree(uuid);
1805 }
1806
1807 return 0;
1808}
1809
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001810int hci_link_keys_clear(struct hci_dev *hdev)
1811{
1812 struct list_head *p, *n;
1813
1814 list_for_each_safe(p, n, &hdev->link_keys) {
1815 struct link_key *key;
1816
1817 key = list_entry(p, struct link_key, list);
1818
1819 list_del(p);
1820 kfree(key);
1821 }
1822
1823 return 0;
1824}
1825
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001826int hci_smp_ltks_clear(struct hci_dev *hdev)
1827{
1828 struct smp_ltk *k, *tmp;
1829
1830 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1831 list_del(&k->list);
1832 kfree(k);
1833 }
1834
1835 return 0;
1836}
1837
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001838struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1839{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001840 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001841
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001842 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001843 if (bacmp(bdaddr, &k->bdaddr) == 0)
1844 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001845
1846 return NULL;
1847}
1848
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301849static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001850 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001851{
1852 /* Legacy key */
1853 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301854 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001855
1856 /* Debug keys are insecure so don't store them persistently */
1857 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301858 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001859
1860 /* Changed combination key and there's no previous one */
1861 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301862 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001863
1864 /* Security mode 3 case */
1865 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301866 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001867
1868 /* Neither local nor remote side had no-bonding as requirement */
1869 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301870 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001871
1872 /* Local side had dedicated bonding as requirement */
1873 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301874 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001875
1876 /* Remote side had dedicated bonding as requirement */
1877 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301878 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001879
1880 /* If none of the above criteria match, then don't store the key
1881 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301882 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001883}
1884
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001885struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001886{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001887 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001888
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001889 list_for_each_entry(k, &hdev->long_term_keys, list) {
1890 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001891 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001892 continue;
1893
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001894 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001895 }
1896
1897 return NULL;
1898}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001899
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001900struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001901 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001902{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001903 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001904
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001905 list_for_each_entry(k, &hdev->long_term_keys, list)
1906 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001907 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001908 return k;
1909
1910 return NULL;
1911}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001912
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001913int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001914 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001915{
1916 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301917 u8 old_key_type;
1918 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001919
1920 old_key = hci_find_link_key(hdev, bdaddr);
1921 if (old_key) {
1922 old_key_type = old_key->type;
1923 key = old_key;
1924 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001925 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001926 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1927 if (!key)
1928 return -ENOMEM;
1929 list_add(&key->list, &hdev->link_keys);
1930 }
1931
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001932 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001933
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001934 /* Some buggy controller combinations generate a changed
1935 * combination key for legacy pairing even when there's no
1936 * previous key */
1937 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001938 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001939 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001940 if (conn)
1941 conn->key_type = type;
1942 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001943
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001944 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001945 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001946 key->pin_len = pin_len;
1947
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001948 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001949 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001950 else
1951 key->type = type;
1952
Johan Hedberg4df378a2011-04-28 11:29:03 -07001953 if (!new_key)
1954 return 0;
1955
1956 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1957
Johan Hedberg744cf192011-11-08 20:40:14 +02001958 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001959
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301960 if (conn)
1961 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001962
1963 return 0;
1964}
1965
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001966int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001967 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001968 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001969{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001970 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001971
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001972 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1973 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001974
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001975 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1976 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001977 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001978 else {
1979 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001980 if (!key)
1981 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001982 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001983 }
1984
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001985 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001986 key->bdaddr_type = addr_type;
1987 memcpy(key->val, tk, sizeof(key->val));
1988 key->authenticated = authenticated;
1989 key->ediv = ediv;
1990 key->enc_size = enc_size;
1991 key->type = type;
1992 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001993
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001994 if (!new_key)
1995 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001996
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001997 if (type & HCI_SMP_LTK)
1998 mgmt_new_ltk(hdev, key, 1);
1999
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002000 return 0;
2001}
2002
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002003int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2004{
2005 struct link_key *key;
2006
2007 key = hci_find_link_key(hdev, bdaddr);
2008 if (!key)
2009 return -ENOENT;
2010
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002011 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002012
2013 list_del(&key->list);
2014 kfree(key);
2015
2016 return 0;
2017}
2018
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002019int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2020{
2021 struct smp_ltk *k, *tmp;
2022
2023 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2024 if (bacmp(bdaddr, &k->bdaddr))
2025 continue;
2026
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002027 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002028
2029 list_del(&k->list);
2030 kfree(k);
2031 }
2032
2033 return 0;
2034}
2035
Ville Tervo6bd32322011-02-16 16:32:41 +02002036/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002037static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002038{
2039 struct hci_dev *hdev = (void *) arg;
2040
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002041 if (hdev->sent_cmd) {
2042 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2043 u16 opcode = __le16_to_cpu(sent->opcode);
2044
2045 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2046 } else {
2047 BT_ERR("%s command tx timeout", hdev->name);
2048 }
2049
Ville Tervo6bd32322011-02-16 16:32:41 +02002050 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002051 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002052}
2053
Szymon Janc2763eda2011-03-22 13:12:22 +01002054struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002055 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002056{
2057 struct oob_data *data;
2058
2059 list_for_each_entry(data, &hdev->remote_oob_data, list)
2060 if (bacmp(bdaddr, &data->bdaddr) == 0)
2061 return data;
2062
2063 return NULL;
2064}
2065
2066int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2067{
2068 struct oob_data *data;
2069
2070 data = hci_find_remote_oob_data(hdev, bdaddr);
2071 if (!data)
2072 return -ENOENT;
2073
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002074 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002075
2076 list_del(&data->list);
2077 kfree(data);
2078
2079 return 0;
2080}
2081
2082int hci_remote_oob_data_clear(struct hci_dev *hdev)
2083{
2084 struct oob_data *data, *n;
2085
2086 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2087 list_del(&data->list);
2088 kfree(data);
2089 }
2090
2091 return 0;
2092}
2093
2094int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002095 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002096{
2097 struct oob_data *data;
2098
2099 data = hci_find_remote_oob_data(hdev, bdaddr);
2100
2101 if (!data) {
2102 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2103 if (!data)
2104 return -ENOMEM;
2105
2106 bacpy(&data->bdaddr, bdaddr);
2107 list_add(&data->list, &hdev->remote_oob_data);
2108 }
2109
2110 memcpy(data->hash, hash, sizeof(data->hash));
2111 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2112
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002113 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002114
2115 return 0;
2116}
2117
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002118struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002119{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002120 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002121
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002122 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002123 if (bacmp(bdaddr, &b->bdaddr) == 0)
2124 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002125
2126 return NULL;
2127}
2128
2129int hci_blacklist_clear(struct hci_dev *hdev)
2130{
2131 struct list_head *p, *n;
2132
2133 list_for_each_safe(p, n, &hdev->blacklist) {
2134 struct bdaddr_list *b;
2135
2136 b = list_entry(p, struct bdaddr_list, list);
2137
2138 list_del(p);
2139 kfree(b);
2140 }
2141
2142 return 0;
2143}
2144
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002145int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002146{
2147 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002148
2149 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2150 return -EBADF;
2151
Antti Julku5e762442011-08-25 16:48:02 +03002152 if (hci_blacklist_lookup(hdev, bdaddr))
2153 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002154
2155 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002156 if (!entry)
2157 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002158
2159 bacpy(&entry->bdaddr, bdaddr);
2160
2161 list_add(&entry->list, &hdev->blacklist);
2162
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002163 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002164}
2165
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002166int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002167{
2168 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002169
Szymon Janc1ec918c2011-11-16 09:32:21 +01002170 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002171 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002172
2173 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002174 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002175 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002176
2177 list_del(&entry->list);
2178 kfree(entry);
2179
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002180 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002181}
2182
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002183static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002184{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002185 if (status) {
2186 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002187
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002188 hci_dev_lock(hdev);
2189 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2190 hci_dev_unlock(hdev);
2191 return;
2192 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002193}
2194
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002195static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002196{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002197 /* General inquiry access code (GIAC) */
2198 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2199 struct hci_request req;
2200 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002201 int err;
2202
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002203 if (status) {
2204 BT_ERR("Failed to disable LE scanning: status %d", status);
2205 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002206 }
2207
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002208 switch (hdev->discovery.type) {
2209 case DISCOV_TYPE_LE:
2210 hci_dev_lock(hdev);
2211 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2212 hci_dev_unlock(hdev);
2213 break;
2214
2215 case DISCOV_TYPE_INTERLEAVED:
2216 hci_req_init(&req, hdev);
2217
2218 memset(&cp, 0, sizeof(cp));
2219 memcpy(&cp.lap, lap, sizeof(cp.lap));
2220 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2221 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2222
2223 hci_dev_lock(hdev);
2224
2225 hci_inquiry_cache_flush(hdev);
2226
2227 err = hci_req_run(&req, inquiry_complete);
2228 if (err) {
2229 BT_ERR("Inquiry request failed: err %d", err);
2230 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2231 }
2232
2233 hci_dev_unlock(hdev);
2234 break;
2235 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002236}
2237
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002238static void le_scan_disable_work(struct work_struct *work)
2239{
2240 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002241 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002242 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002243 struct hci_request req;
2244 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002245
2246 BT_DBG("%s", hdev->name);
2247
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002248 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002249
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002250 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002251 cp.enable = LE_SCAN_DISABLE;
2252 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002253
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002254 err = hci_req_run(&req, le_scan_disable_work_complete);
2255 if (err)
2256 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002257}
2258
David Herrmann9be0dab2012-04-22 14:39:57 +02002259/* Alloc HCI device */
2260struct hci_dev *hci_alloc_dev(void)
2261{
2262 struct hci_dev *hdev;
2263
2264 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2265 if (!hdev)
2266 return NULL;
2267
David Herrmannb1b813d2012-04-22 14:39:58 +02002268 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2269 hdev->esco_type = (ESCO_HV1);
2270 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002271 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2272 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002273 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2274 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002275
David Herrmannb1b813d2012-04-22 14:39:58 +02002276 hdev->sniff_max_interval = 800;
2277 hdev->sniff_min_interval = 80;
2278
Marcel Holtmannbef64732013-10-11 08:23:19 -07002279 hdev->le_scan_interval = 0x0060;
2280 hdev->le_scan_window = 0x0030;
2281
David Herrmannb1b813d2012-04-22 14:39:58 +02002282 mutex_init(&hdev->lock);
2283 mutex_init(&hdev->req_lock);
2284
2285 INIT_LIST_HEAD(&hdev->mgmt_pending);
2286 INIT_LIST_HEAD(&hdev->blacklist);
2287 INIT_LIST_HEAD(&hdev->uuids);
2288 INIT_LIST_HEAD(&hdev->link_keys);
2289 INIT_LIST_HEAD(&hdev->long_term_keys);
2290 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002291 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002292
2293 INIT_WORK(&hdev->rx_work, hci_rx_work);
2294 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2295 INIT_WORK(&hdev->tx_work, hci_tx_work);
2296 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002297
David Herrmannb1b813d2012-04-22 14:39:58 +02002298 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2299 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2300 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2301
David Herrmannb1b813d2012-04-22 14:39:58 +02002302 skb_queue_head_init(&hdev->rx_q);
2303 skb_queue_head_init(&hdev->cmd_q);
2304 skb_queue_head_init(&hdev->raw_q);
2305
2306 init_waitqueue_head(&hdev->req_wait_q);
2307
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002308 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002309
David Herrmannb1b813d2012-04-22 14:39:58 +02002310 hci_init_sysfs(hdev);
2311 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002312
2313 return hdev;
2314}
2315EXPORT_SYMBOL(hci_alloc_dev);
2316
2317/* Free HCI device */
2318void hci_free_dev(struct hci_dev *hdev)
2319{
David Herrmann9be0dab2012-04-22 14:39:57 +02002320 /* will free via device release */
2321 put_device(&hdev->dev);
2322}
2323EXPORT_SYMBOL(hci_free_dev);
2324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325/* Register HCI device */
2326int hci_register_dev(struct hci_dev *hdev)
2327{
David Herrmannb1b813d2012-04-22 14:39:58 +02002328 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
David Herrmann010666a2012-01-07 15:47:07 +01002330 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 return -EINVAL;
2332
Mat Martineau08add512011-11-02 16:18:36 -07002333 /* Do not allow HCI_AMP devices to register at index 0,
2334 * so the index can be used as the AMP controller ID.
2335 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002336 switch (hdev->dev_type) {
2337 case HCI_BREDR:
2338 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2339 break;
2340 case HCI_AMP:
2341 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2342 break;
2343 default:
2344 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002346
Sasha Levin3df92b32012-05-27 22:36:56 +02002347 if (id < 0)
2348 return id;
2349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 sprintf(hdev->name, "hci%d", id);
2351 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002352
2353 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2354
Kees Cookd8537542013-07-03 15:04:57 -07002355 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2356 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002357 if (!hdev->workqueue) {
2358 error = -ENOMEM;
2359 goto err;
2360 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002361
Kees Cookd8537542013-07-03 15:04:57 -07002362 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2363 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002364 if (!hdev->req_workqueue) {
2365 destroy_workqueue(hdev->workqueue);
2366 error = -ENOMEM;
2367 goto err;
2368 }
2369
David Herrmann33ca9542011-10-08 14:58:49 +02002370 error = hci_add_sysfs(hdev);
2371 if (error < 0)
2372 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002374 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002375 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2376 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002377 if (hdev->rfkill) {
2378 if (rfkill_register(hdev->rfkill) < 0) {
2379 rfkill_destroy(hdev->rfkill);
2380 hdev->rfkill = NULL;
2381 }
2382 }
2383
Johan Hedberg5e130362013-09-13 08:58:17 +03002384 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2385 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2386
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002387 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002388 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002389
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002390 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002391 /* Assume BR/EDR support until proven otherwise (such as
2392 * through reading supported features during init.
2393 */
2394 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2395 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002396
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002397 write_lock(&hci_dev_list_lock);
2398 list_add(&hdev->list, &hci_dev_list);
2399 write_unlock(&hci_dev_list_lock);
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002402 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
Johan Hedberg19202572013-01-14 22:33:51 +02002404 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002407
David Herrmann33ca9542011-10-08 14:58:49 +02002408err_wqueue:
2409 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002410 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002411err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002412 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002413
David Herrmann33ca9542011-10-08 14:58:49 +02002414 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415}
2416EXPORT_SYMBOL(hci_register_dev);
2417
2418/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002419void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420{
Sasha Levin3df92b32012-05-27 22:36:56 +02002421 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002422
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002423 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Johan Hovold94324962012-03-15 14:48:41 +01002425 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2426
Sasha Levin3df92b32012-05-27 22:36:56 +02002427 id = hdev->id;
2428
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002429 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002431 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
2433 hci_dev_do_close(hdev);
2434
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302435 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002436 kfree_skb(hdev->reassembly[i]);
2437
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002438 cancel_work_sync(&hdev->power_on);
2439
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002440 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002441 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002442 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002443 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002444 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002445 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002446
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002447 /* mgmt_index_removed should take care of emptying the
2448 * pending list */
2449 BUG_ON(!list_empty(&hdev->mgmt_pending));
2450
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 hci_notify(hdev, HCI_DEV_UNREG);
2452
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002453 if (hdev->rfkill) {
2454 rfkill_unregister(hdev->rfkill);
2455 rfkill_destroy(hdev->rfkill);
2456 }
2457
David Herrmannce242972011-10-08 14:58:48 +02002458 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002459
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002460 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002461 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002462
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002463 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002464 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002465 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002466 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002467 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002468 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002469 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002470
David Herrmanndc946bd2012-01-07 15:47:24 +01002471 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002472
2473 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475EXPORT_SYMBOL(hci_unregister_dev);
2476
2477/* Suspend HCI device */
2478int hci_suspend_dev(struct hci_dev *hdev)
2479{
2480 hci_notify(hdev, HCI_DEV_SUSPEND);
2481 return 0;
2482}
2483EXPORT_SYMBOL(hci_suspend_dev);
2484
2485/* Resume HCI device */
2486int hci_resume_dev(struct hci_dev *hdev)
2487{
2488 hci_notify(hdev, HCI_DEV_RESUME);
2489 return 0;
2490}
2491EXPORT_SYMBOL(hci_resume_dev);
2492
Marcel Holtmann76bca882009-11-18 00:40:39 +01002493/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002494int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002495{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002496 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002497 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002498 kfree_skb(skb);
2499 return -ENXIO;
2500 }
2501
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002502 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002503 bt_cb(skb)->incoming = 1;
2504
2505 /* Time stamp */
2506 __net_timestamp(skb);
2507
Marcel Holtmann76bca882009-11-18 00:40:39 +01002508 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002509 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002510
Marcel Holtmann76bca882009-11-18 00:40:39 +01002511 return 0;
2512}
2513EXPORT_SYMBOL(hci_recv_frame);
2514
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302515static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002516 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302517{
2518 int len = 0;
2519 int hlen = 0;
2520 int remain = count;
2521 struct sk_buff *skb;
2522 struct bt_skb_cb *scb;
2523
2524 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002525 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302526 return -EILSEQ;
2527
2528 skb = hdev->reassembly[index];
2529
2530 if (!skb) {
2531 switch (type) {
2532 case HCI_ACLDATA_PKT:
2533 len = HCI_MAX_FRAME_SIZE;
2534 hlen = HCI_ACL_HDR_SIZE;
2535 break;
2536 case HCI_EVENT_PKT:
2537 len = HCI_MAX_EVENT_SIZE;
2538 hlen = HCI_EVENT_HDR_SIZE;
2539 break;
2540 case HCI_SCODATA_PKT:
2541 len = HCI_MAX_SCO_SIZE;
2542 hlen = HCI_SCO_HDR_SIZE;
2543 break;
2544 }
2545
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002546 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302547 if (!skb)
2548 return -ENOMEM;
2549
2550 scb = (void *) skb->cb;
2551 scb->expect = hlen;
2552 scb->pkt_type = type;
2553
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302554 hdev->reassembly[index] = skb;
2555 }
2556
2557 while (count) {
2558 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002559 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302560
2561 memcpy(skb_put(skb, len), data, len);
2562
2563 count -= len;
2564 data += len;
2565 scb->expect -= len;
2566 remain = count;
2567
2568 switch (type) {
2569 case HCI_EVENT_PKT:
2570 if (skb->len == HCI_EVENT_HDR_SIZE) {
2571 struct hci_event_hdr *h = hci_event_hdr(skb);
2572 scb->expect = h->plen;
2573
2574 if (skb_tailroom(skb) < scb->expect) {
2575 kfree_skb(skb);
2576 hdev->reassembly[index] = NULL;
2577 return -ENOMEM;
2578 }
2579 }
2580 break;
2581
2582 case HCI_ACLDATA_PKT:
2583 if (skb->len == HCI_ACL_HDR_SIZE) {
2584 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2585 scb->expect = __le16_to_cpu(h->dlen);
2586
2587 if (skb_tailroom(skb) < scb->expect) {
2588 kfree_skb(skb);
2589 hdev->reassembly[index] = NULL;
2590 return -ENOMEM;
2591 }
2592 }
2593 break;
2594
2595 case HCI_SCODATA_PKT:
2596 if (skb->len == HCI_SCO_HDR_SIZE) {
2597 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2598 scb->expect = h->dlen;
2599
2600 if (skb_tailroom(skb) < scb->expect) {
2601 kfree_skb(skb);
2602 hdev->reassembly[index] = NULL;
2603 return -ENOMEM;
2604 }
2605 }
2606 break;
2607 }
2608
2609 if (scb->expect == 0) {
2610 /* Complete frame */
2611
2612 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002613 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302614
2615 hdev->reassembly[index] = NULL;
2616 return remain;
2617 }
2618 }
2619
2620 return remain;
2621}
2622
Marcel Holtmannef222012007-07-11 06:42:04 +02002623int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2624{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302625 int rem = 0;
2626
Marcel Holtmannef222012007-07-11 06:42:04 +02002627 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2628 return -EILSEQ;
2629
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002630 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002631 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302632 if (rem < 0)
2633 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002634
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302635 data += (count - rem);
2636 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002637 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002638
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302639 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002640}
2641EXPORT_SYMBOL(hci_recv_fragment);
2642
Suraj Sumangala99811512010-07-14 13:02:19 +05302643#define STREAM_REASSEMBLY 0
2644
2645int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2646{
2647 int type;
2648 int rem = 0;
2649
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002650 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302651 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2652
2653 if (!skb) {
2654 struct { char type; } *pkt;
2655
2656 /* Start of the frame */
2657 pkt = data;
2658 type = pkt->type;
2659
2660 data++;
2661 count--;
2662 } else
2663 type = bt_cb(skb)->pkt_type;
2664
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002665 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002666 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302667 if (rem < 0)
2668 return rem;
2669
2670 data += (count - rem);
2671 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002672 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302673
2674 return rem;
2675}
2676EXPORT_SYMBOL(hci_recv_stream_fragment);
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678/* ---- Interface to upper protocols ---- */
2679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680int hci_register_cb(struct hci_cb *cb)
2681{
2682 BT_DBG("%p name %s", cb, cb->name);
2683
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002684 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002686 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
2688 return 0;
2689}
2690EXPORT_SYMBOL(hci_register_cb);
2691
2692int hci_unregister_cb(struct hci_cb *cb)
2693{
2694 BT_DBG("%p name %s", cb, cb->name);
2695
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002696 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002698 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 return 0;
2701}
2702EXPORT_SYMBOL(hci_unregister_cb);
2703
Marcel Holtmann51086992013-10-10 14:54:19 -07002704static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002706 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002708 /* Time stamp */
2709 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002711 /* Send copy to monitor */
2712 hci_send_to_monitor(hdev, skb);
2713
2714 if (atomic_read(&hdev->promisc)) {
2715 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002716 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 }
2718
2719 /* Get rid of skb owner, prior to sending to the driver. */
2720 skb_orphan(skb);
2721
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002722 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002723 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724}
2725
Johan Hedberg3119ae92013-03-05 20:37:44 +02002726void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2727{
2728 skb_queue_head_init(&req->cmd_q);
2729 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002730 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002731}
2732
2733int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2734{
2735 struct hci_dev *hdev = req->hdev;
2736 struct sk_buff *skb;
2737 unsigned long flags;
2738
2739 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2740
Andre Guedes5d73e032013-03-08 11:20:16 -03002741 /* If an error occured during request building, remove all HCI
2742 * commands queued on the HCI request queue.
2743 */
2744 if (req->err) {
2745 skb_queue_purge(&req->cmd_q);
2746 return req->err;
2747 }
2748
Johan Hedberg3119ae92013-03-05 20:37:44 +02002749 /* Do not allow empty requests */
2750 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002751 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002752
2753 skb = skb_peek_tail(&req->cmd_q);
2754 bt_cb(skb)->req.complete = complete;
2755
2756 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2757 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2758 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2759
2760 queue_work(hdev->workqueue, &hdev->cmd_work);
2761
2762 return 0;
2763}
2764
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002765static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002766 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767{
2768 int len = HCI_COMMAND_HDR_SIZE + plen;
2769 struct hci_command_hdr *hdr;
2770 struct sk_buff *skb;
2771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002773 if (!skb)
2774 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002777 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 hdr->plen = plen;
2779
2780 if (plen)
2781 memcpy(skb_put(skb, plen), param, plen);
2782
2783 BT_DBG("skb len %d", skb->len);
2784
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002785 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002786
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002787 return skb;
2788}
2789
2790/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002791int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2792 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002793{
2794 struct sk_buff *skb;
2795
2796 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2797
2798 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2799 if (!skb) {
2800 BT_ERR("%s no memory for command", hdev->name);
2801 return -ENOMEM;
2802 }
2803
Johan Hedberg11714b32013-03-05 20:37:47 +02002804 /* Stand-alone HCI commands must be flaged as
2805 * single-command requests.
2806 */
2807 bt_cb(skb)->req.start = true;
2808
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002810 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
2812 return 0;
2813}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814
Johan Hedberg71c76a12013-03-05 20:37:46 +02002815/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002816void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2817 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002818{
2819 struct hci_dev *hdev = req->hdev;
2820 struct sk_buff *skb;
2821
2822 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2823
Andre Guedes34739c12013-03-08 11:20:18 -03002824 /* If an error occured during request building, there is no point in
2825 * queueing the HCI command. We can simply return.
2826 */
2827 if (req->err)
2828 return;
2829
Johan Hedberg71c76a12013-03-05 20:37:46 +02002830 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2831 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002832 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2833 hdev->name, opcode);
2834 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002835 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002836 }
2837
2838 if (skb_queue_empty(&req->cmd_q))
2839 bt_cb(skb)->req.start = true;
2840
Johan Hedberg02350a72013-04-03 21:50:29 +03002841 bt_cb(skb)->req.event = event;
2842
Johan Hedberg71c76a12013-03-05 20:37:46 +02002843 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002844}
2845
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002846void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2847 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002848{
2849 hci_req_add_ev(req, opcode, plen, param, 0);
2850}
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002853void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854{
2855 struct hci_command_hdr *hdr;
2856
2857 if (!hdev->sent_cmd)
2858 return NULL;
2859
2860 hdr = (void *) hdev->sent_cmd->data;
2861
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002862 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 return NULL;
2864
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002865 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866
2867 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2868}
2869
2870/* Send ACL data */
2871static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2872{
2873 struct hci_acl_hdr *hdr;
2874 int len = skb->len;
2875
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002876 skb_push(skb, HCI_ACL_HDR_SIZE);
2877 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002878 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002879 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2880 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881}
2882
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002883static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002884 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002886 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 struct hci_dev *hdev = conn->hdev;
2888 struct sk_buff *list;
2889
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002890 skb->len = skb_headlen(skb);
2891 skb->data_len = 0;
2892
2893 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002894
2895 switch (hdev->dev_type) {
2896 case HCI_BREDR:
2897 hci_add_acl_hdr(skb, conn->handle, flags);
2898 break;
2899 case HCI_AMP:
2900 hci_add_acl_hdr(skb, chan->handle, flags);
2901 break;
2902 default:
2903 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2904 return;
2905 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002906
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002907 list = skb_shinfo(skb)->frag_list;
2908 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 /* Non fragmented */
2910 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2911
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002912 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 } else {
2914 /* Fragmented */
2915 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2916
2917 skb_shinfo(skb)->frag_list = NULL;
2918
2919 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002920 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002922 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002923
2924 flags &= ~ACL_START;
2925 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 do {
2927 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002928
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002929 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002930 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2933
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002934 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 } while (list);
2936
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002937 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002939}
2940
2941void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2942{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002943 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002944
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002945 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002946
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002947 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002949 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
2952/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002953void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954{
2955 struct hci_dev *hdev = conn->hdev;
2956 struct hci_sco_hdr hdr;
2957
2958 BT_DBG("%s len %d", hdev->name, skb->len);
2959
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002960 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 hdr.dlen = skb->len;
2962
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002963 skb_push(skb, HCI_SCO_HDR_SIZE);
2964 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002965 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002967 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002968
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002970 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
2973/* ---- HCI TX task (outgoing data) ---- */
2974
2975/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002976static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2977 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978{
2979 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002980 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002981 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002983 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002985
2986 rcu_read_lock();
2987
2988 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002989 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002991
2992 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2993 continue;
2994
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 num++;
2996
2997 if (c->sent < min) {
2998 min = c->sent;
2999 conn = c;
3000 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003001
3002 if (hci_conn_num(hdev, type) == num)
3003 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 }
3005
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003006 rcu_read_unlock();
3007
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003009 int cnt, q;
3010
3011 switch (conn->type) {
3012 case ACL_LINK:
3013 cnt = hdev->acl_cnt;
3014 break;
3015 case SCO_LINK:
3016 case ESCO_LINK:
3017 cnt = hdev->sco_cnt;
3018 break;
3019 case LE_LINK:
3020 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3021 break;
3022 default:
3023 cnt = 0;
3024 BT_ERR("Unknown link type");
3025 }
3026
3027 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 *quote = q ? q : 1;
3029 } else
3030 *quote = 0;
3031
3032 BT_DBG("conn %p quote %d", conn, *quote);
3033 return conn;
3034}
3035
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003036static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037{
3038 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003039 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040
Ville Tervobae1f5d92011-02-10 22:38:53 -03003041 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003043 rcu_read_lock();
3044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003046 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003047 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003048 BT_ERR("%s killing stalled connection %pMR",
3049 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003050 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 }
3052 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003053
3054 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055}
3056
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003057static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3058 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003059{
3060 struct hci_conn_hash *h = &hdev->conn_hash;
3061 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003062 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003063 struct hci_conn *conn;
3064 int cnt, q, conn_num = 0;
3065
3066 BT_DBG("%s", hdev->name);
3067
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003068 rcu_read_lock();
3069
3070 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003071 struct hci_chan *tmp;
3072
3073 if (conn->type != type)
3074 continue;
3075
3076 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3077 continue;
3078
3079 conn_num++;
3080
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003081 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003082 struct sk_buff *skb;
3083
3084 if (skb_queue_empty(&tmp->data_q))
3085 continue;
3086
3087 skb = skb_peek(&tmp->data_q);
3088 if (skb->priority < cur_prio)
3089 continue;
3090
3091 if (skb->priority > cur_prio) {
3092 num = 0;
3093 min = ~0;
3094 cur_prio = skb->priority;
3095 }
3096
3097 num++;
3098
3099 if (conn->sent < min) {
3100 min = conn->sent;
3101 chan = tmp;
3102 }
3103 }
3104
3105 if (hci_conn_num(hdev, type) == conn_num)
3106 break;
3107 }
3108
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003109 rcu_read_unlock();
3110
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003111 if (!chan)
3112 return NULL;
3113
3114 switch (chan->conn->type) {
3115 case ACL_LINK:
3116 cnt = hdev->acl_cnt;
3117 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003118 case AMP_LINK:
3119 cnt = hdev->block_cnt;
3120 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003121 case SCO_LINK:
3122 case ESCO_LINK:
3123 cnt = hdev->sco_cnt;
3124 break;
3125 case LE_LINK:
3126 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3127 break;
3128 default:
3129 cnt = 0;
3130 BT_ERR("Unknown link type");
3131 }
3132
3133 q = cnt / num;
3134 *quote = q ? q : 1;
3135 BT_DBG("chan %p quote %d", chan, *quote);
3136 return chan;
3137}
3138
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003139static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3140{
3141 struct hci_conn_hash *h = &hdev->conn_hash;
3142 struct hci_conn *conn;
3143 int num = 0;
3144
3145 BT_DBG("%s", hdev->name);
3146
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003147 rcu_read_lock();
3148
3149 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003150 struct hci_chan *chan;
3151
3152 if (conn->type != type)
3153 continue;
3154
3155 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3156 continue;
3157
3158 num++;
3159
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003160 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003161 struct sk_buff *skb;
3162
3163 if (chan->sent) {
3164 chan->sent = 0;
3165 continue;
3166 }
3167
3168 if (skb_queue_empty(&chan->data_q))
3169 continue;
3170
3171 skb = skb_peek(&chan->data_q);
3172 if (skb->priority >= HCI_PRIO_MAX - 1)
3173 continue;
3174
3175 skb->priority = HCI_PRIO_MAX - 1;
3176
3177 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003178 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003179 }
3180
3181 if (hci_conn_num(hdev, type) == num)
3182 break;
3183 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003184
3185 rcu_read_unlock();
3186
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003187}
3188
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003189static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3190{
3191 /* Calculate count of blocks used by this packet */
3192 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3193}
3194
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003195static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 if (!test_bit(HCI_RAW, &hdev->flags)) {
3198 /* ACL tx timeout must be longer than maximum
3199 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003200 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003201 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003202 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003204}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003206static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003207{
3208 unsigned int cnt = hdev->acl_cnt;
3209 struct hci_chan *chan;
3210 struct sk_buff *skb;
3211 int quote;
3212
3213 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003214
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003215 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003216 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003217 u32 priority = (skb_peek(&chan->data_q))->priority;
3218 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003219 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003220 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003221
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003222 /* Stop if priority has changed */
3223 if (skb->priority < priority)
3224 break;
3225
3226 skb = skb_dequeue(&chan->data_q);
3227
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003228 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003229 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003230
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003231 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 hdev->acl_last_tx = jiffies;
3233
3234 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003235 chan->sent++;
3236 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 }
3238 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003239
3240 if (cnt != hdev->acl_cnt)
3241 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242}
3243
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003244static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003245{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003246 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003247 struct hci_chan *chan;
3248 struct sk_buff *skb;
3249 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003250 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003251
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003252 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003254 BT_DBG("%s", hdev->name);
3255
3256 if (hdev->dev_type == HCI_AMP)
3257 type = AMP_LINK;
3258 else
3259 type = ACL_LINK;
3260
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003261 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003262 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003263 u32 priority = (skb_peek(&chan->data_q))->priority;
3264 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3265 int blocks;
3266
3267 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003268 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003269
3270 /* Stop if priority has changed */
3271 if (skb->priority < priority)
3272 break;
3273
3274 skb = skb_dequeue(&chan->data_q);
3275
3276 blocks = __get_blocks(hdev, skb);
3277 if (blocks > hdev->block_cnt)
3278 return;
3279
3280 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003281 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003282
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003283 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003284 hdev->acl_last_tx = jiffies;
3285
3286 hdev->block_cnt -= blocks;
3287 quote -= blocks;
3288
3289 chan->sent += blocks;
3290 chan->conn->sent += blocks;
3291 }
3292 }
3293
3294 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003295 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003296}
3297
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003298static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003299{
3300 BT_DBG("%s", hdev->name);
3301
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003302 /* No ACL link over BR/EDR controller */
3303 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3304 return;
3305
3306 /* No AMP link over AMP controller */
3307 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003308 return;
3309
3310 switch (hdev->flow_ctl_mode) {
3311 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3312 hci_sched_acl_pkt(hdev);
3313 break;
3314
3315 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3316 hci_sched_acl_blk(hdev);
3317 break;
3318 }
3319}
3320
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003322static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323{
3324 struct hci_conn *conn;
3325 struct sk_buff *skb;
3326 int quote;
3327
3328 BT_DBG("%s", hdev->name);
3329
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003330 if (!hci_conn_num(hdev, SCO_LINK))
3331 return;
3332
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3334 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3335 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003336 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
3338 conn->sent++;
3339 if (conn->sent == ~0)
3340 conn->sent = 0;
3341 }
3342 }
3343}
3344
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003345static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003346{
3347 struct hci_conn *conn;
3348 struct sk_buff *skb;
3349 int quote;
3350
3351 BT_DBG("%s", hdev->name);
3352
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003353 if (!hci_conn_num(hdev, ESCO_LINK))
3354 return;
3355
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003356 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3357 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003358 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3359 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003360 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003361
3362 conn->sent++;
3363 if (conn->sent == ~0)
3364 conn->sent = 0;
3365 }
3366 }
3367}
3368
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003369static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003370{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003371 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003372 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003373 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003374
3375 BT_DBG("%s", hdev->name);
3376
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003377 if (!hci_conn_num(hdev, LE_LINK))
3378 return;
3379
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003380 if (!test_bit(HCI_RAW, &hdev->flags)) {
3381 /* LE tx timeout must be longer than maximum
3382 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003383 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003384 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003385 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003386 }
3387
3388 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003389 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003390 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003391 u32 priority = (skb_peek(&chan->data_q))->priority;
3392 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003393 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003394 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003395
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003396 /* Stop if priority has changed */
3397 if (skb->priority < priority)
3398 break;
3399
3400 skb = skb_dequeue(&chan->data_q);
3401
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003402 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003403 hdev->le_last_tx = jiffies;
3404
3405 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003406 chan->sent++;
3407 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003408 }
3409 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003410
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003411 if (hdev->le_pkts)
3412 hdev->le_cnt = cnt;
3413 else
3414 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003415
3416 if (cnt != tmp)
3417 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003418}
3419
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003420static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003422 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 struct sk_buff *skb;
3424
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003425 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003426 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
Marcel Holtmann52de5992013-09-03 18:08:38 -07003428 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3429 /* Schedule queues and send stuff to HCI driver */
3430 hci_sched_acl(hdev);
3431 hci_sched_sco(hdev);
3432 hci_sched_esco(hdev);
3433 hci_sched_le(hdev);
3434 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 /* Send next queued raw (unknown type) packet */
3437 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003438 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439}
3440
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003441/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
3443/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003444static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445{
3446 struct hci_acl_hdr *hdr = (void *) skb->data;
3447 struct hci_conn *conn;
3448 __u16 handle, flags;
3449
3450 skb_pull(skb, HCI_ACL_HDR_SIZE);
3451
3452 handle = __le16_to_cpu(hdr->handle);
3453 flags = hci_flags(handle);
3454 handle = hci_handle(handle);
3455
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003456 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003457 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459 hdev->stat.acl_rx++;
3460
3461 hci_dev_lock(hdev);
3462 conn = hci_conn_hash_lookup_handle(hdev, handle);
3463 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003466 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003467
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003469 l2cap_recv_acldata(conn, skb, flags);
3470 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003472 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003473 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 }
3475
3476 kfree_skb(skb);
3477}
3478
3479/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003480static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481{
3482 struct hci_sco_hdr *hdr = (void *) skb->data;
3483 struct hci_conn *conn;
3484 __u16 handle;
3485
3486 skb_pull(skb, HCI_SCO_HDR_SIZE);
3487
3488 handle = __le16_to_cpu(hdr->handle);
3489
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003490 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491
3492 hdev->stat.sco_rx++;
3493
3494 hci_dev_lock(hdev);
3495 conn = hci_conn_hash_lookup_handle(hdev, handle);
3496 hci_dev_unlock(hdev);
3497
3498 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003500 sco_recv_scodata(conn, skb);
3501 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003503 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003504 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 }
3506
3507 kfree_skb(skb);
3508}
3509
Johan Hedberg9238f362013-03-05 20:37:48 +02003510static bool hci_req_is_complete(struct hci_dev *hdev)
3511{
3512 struct sk_buff *skb;
3513
3514 skb = skb_peek(&hdev->cmd_q);
3515 if (!skb)
3516 return true;
3517
3518 return bt_cb(skb)->req.start;
3519}
3520
Johan Hedberg42c6b122013-03-05 20:37:49 +02003521static void hci_resend_last(struct hci_dev *hdev)
3522{
3523 struct hci_command_hdr *sent;
3524 struct sk_buff *skb;
3525 u16 opcode;
3526
3527 if (!hdev->sent_cmd)
3528 return;
3529
3530 sent = (void *) hdev->sent_cmd->data;
3531 opcode = __le16_to_cpu(sent->opcode);
3532 if (opcode == HCI_OP_RESET)
3533 return;
3534
3535 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3536 if (!skb)
3537 return;
3538
3539 skb_queue_head(&hdev->cmd_q, skb);
3540 queue_work(hdev->workqueue, &hdev->cmd_work);
3541}
3542
Johan Hedberg9238f362013-03-05 20:37:48 +02003543void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3544{
3545 hci_req_complete_t req_complete = NULL;
3546 struct sk_buff *skb;
3547 unsigned long flags;
3548
3549 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3550
Johan Hedberg42c6b122013-03-05 20:37:49 +02003551 /* If the completed command doesn't match the last one that was
3552 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003553 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003554 if (!hci_sent_cmd_data(hdev, opcode)) {
3555 /* Some CSR based controllers generate a spontaneous
3556 * reset complete event during init and any pending
3557 * command will never be completed. In such a case we
3558 * need to resend whatever was the last sent
3559 * command.
3560 */
3561 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3562 hci_resend_last(hdev);
3563
Johan Hedberg9238f362013-03-05 20:37:48 +02003564 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003565 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003566
3567 /* If the command succeeded and there's still more commands in
3568 * this request the request is not yet complete.
3569 */
3570 if (!status && !hci_req_is_complete(hdev))
3571 return;
3572
3573 /* If this was the last command in a request the complete
3574 * callback would be found in hdev->sent_cmd instead of the
3575 * command queue (hdev->cmd_q).
3576 */
3577 if (hdev->sent_cmd) {
3578 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003579
3580 if (req_complete) {
3581 /* We must set the complete callback to NULL to
3582 * avoid calling the callback more than once if
3583 * this function gets called again.
3584 */
3585 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3586
Johan Hedberg9238f362013-03-05 20:37:48 +02003587 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003588 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003589 }
3590
3591 /* Remove all pending commands belonging to this request */
3592 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3593 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3594 if (bt_cb(skb)->req.start) {
3595 __skb_queue_head(&hdev->cmd_q, skb);
3596 break;
3597 }
3598
3599 req_complete = bt_cb(skb)->req.complete;
3600 kfree_skb(skb);
3601 }
3602 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3603
3604call_complete:
3605 if (req_complete)
3606 req_complete(hdev, status);
3607}
3608
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003609static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003611 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 struct sk_buff *skb;
3613
3614 BT_DBG("%s", hdev->name);
3615
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003617 /* Send copy to monitor */
3618 hci_send_to_monitor(hdev, skb);
3619
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 if (atomic_read(&hdev->promisc)) {
3621 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003622 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 }
3624
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003625 if (test_bit(HCI_RAW, &hdev->flags) ||
3626 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 kfree_skb(skb);
3628 continue;
3629 }
3630
3631 if (test_bit(HCI_INIT, &hdev->flags)) {
3632 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003633 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 case HCI_ACLDATA_PKT:
3635 case HCI_SCODATA_PKT:
3636 kfree_skb(skb);
3637 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 }
3640
3641 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003642 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003644 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 hci_event_packet(hdev, skb);
3646 break;
3647
3648 case HCI_ACLDATA_PKT:
3649 BT_DBG("%s ACL data packet", hdev->name);
3650 hci_acldata_packet(hdev, skb);
3651 break;
3652
3653 case HCI_SCODATA_PKT:
3654 BT_DBG("%s SCO data packet", hdev->name);
3655 hci_scodata_packet(hdev, skb);
3656 break;
3657
3658 default:
3659 kfree_skb(skb);
3660 break;
3661 }
3662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663}
3664
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003665static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003667 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 struct sk_buff *skb;
3669
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003670 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3671 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003674 if (atomic_read(&hdev->cmd_cnt)) {
3675 skb = skb_dequeue(&hdev->cmd_q);
3676 if (!skb)
3677 return;
3678
Wei Yongjun7585b972009-02-25 18:29:52 +08003679 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003681 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003682 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003684 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003685 if (test_bit(HCI_RESET, &hdev->flags))
3686 del_timer(&hdev->cmd_timer);
3687 else
3688 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003689 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 } else {
3691 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003692 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 }
3694 }
3695}