blob: b5ef05e66a2ddb750cb0b4d24b88690f2578fd61 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700356 struct hci_dev *hdev = req->hdev;
357
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200363
364 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376 /* Clear Event Filters */
377 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200378 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379
380 /* Connection accept timeout ~20 secs */
381 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700384 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
385 * but it does not support page scan related HCI commands.
386 */
387 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500388 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
390 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300395 struct hci_dev *hdev = req->hdev;
396
Johan Hedberg2177bab2013-03-05 20:37:43 +0200397 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200399
400 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402
403 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408
409 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300411
412 /* LE-only controllers have LE implicitly enabled */
413 if (!lmp_bredr_capable(hdev))
414 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200415}
416
417static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
418{
419 if (lmp_ext_inq_capable(hdev))
420 return 0x02;
421
422 if (lmp_inq_rssi_capable(hdev))
423 return 0x01;
424
425 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
426 hdev->lmp_subver == 0x0757)
427 return 0x01;
428
429 if (hdev->manufacturer == 15) {
430 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
431 return 0x01;
432 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
433 return 0x01;
434 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
435 return 0x01;
436 }
437
438 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
439 hdev->lmp_subver == 0x1805)
440 return 0x01;
441
442 return 0x00;
443}
444
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446{
447 u8 mode;
448
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200450
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452}
453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 struct hci_dev *hdev = req->hdev;
457
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458 /* The second byte is 0xff instead of 0x9f (two reserved bits
459 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
460 * command otherwise.
461 */
462 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
463
464 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
465 * any event mask for pre 1.2 devices.
466 */
467 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
468 return;
469
470 if (lmp_bredr_capable(hdev)) {
471 events[4] |= 0x01; /* Flow Specification Complete */
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473 events[4] |= 0x04; /* Read Remote Extended Features Complete */
474 events[5] |= 0x08; /* Synchronous Connection Complete */
475 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700476 } else {
477 /* Use a different default for LE-only devices */
478 memset(events, 0, sizeof(events));
479 events[0] |= 0x10; /* Disconnection Complete */
480 events[0] |= 0x80; /* Encryption Change */
481 events[1] |= 0x08; /* Read Remote Version Information Complete */
482 events[1] |= 0x20; /* Command Complete */
483 events[1] |= 0x40; /* Command Status */
484 events[1] |= 0x80; /* Hardware Error */
485 events[2] |= 0x04; /* Number of Completed Packets */
486 events[3] |= 0x02; /* Data Buffer Overflow */
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488 }
489
490 if (lmp_inq_rssi_capable(hdev))
491 events[4] |= 0x02; /* Inquiry Result with RSSI */
492
493 if (lmp_sniffsubr_capable(hdev))
494 events[5] |= 0x20; /* Sniff Subrating */
495
496 if (lmp_pause_enc_capable(hdev))
497 events[5] |= 0x80; /* Encryption Key Refresh Complete */
498
499 if (lmp_ext_inq_capable(hdev))
500 events[5] |= 0x40; /* Extended Inquiry Result */
501
502 if (lmp_no_flush_capable(hdev))
503 events[7] |= 0x01; /* Enhanced Flush Complete */
504
505 if (lmp_lsto_capable(hdev))
506 events[6] |= 0x80; /* Link Supervision Timeout Changed */
507
508 if (lmp_ssp_capable(hdev)) {
509 events[6] |= 0x01; /* IO Capability Request */
510 events[6] |= 0x02; /* IO Capability Response */
511 events[6] |= 0x04; /* User Confirmation Request */
512 events[6] |= 0x08; /* User Passkey Request */
513 events[6] |= 0x10; /* Remote OOB Data Request */
514 events[6] |= 0x20; /* Simple Pairing Complete */
515 events[7] |= 0x04; /* User Passkey Notification */
516 events[7] |= 0x08; /* Keypress Notification */
517 events[7] |= 0x10; /* Remote Host Supported
518 * Features Notification
519 */
520 }
521
522 if (lmp_le_capable(hdev))
523 events[7] |= 0x20; /* LE Meta-Event */
524
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526
527 if (lmp_le_capable(hdev)) {
528 memset(events, 0, sizeof(events));
529 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
531 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532 }
533}
534
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 struct hci_dev *hdev = req->hdev;
538
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300541 else
542 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200543
544 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200548
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300549 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
550 * local supported commands HCI command.
551 */
552 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ssp_capable(hdev)) {
556 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
557 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
559 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560 } else {
561 struct hci_cp_write_eir cp;
562
563 memset(hdev->eir, 0, sizeof(hdev->eir));
564 memset(&cp, 0, sizeof(cp));
565
Johan Hedberg42c6b122013-03-05 20:37:49 +0200566 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568 }
569
570 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200571 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200572
573 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575
576 if (lmp_ext_feat_capable(hdev)) {
577 struct hci_cp_read_local_ext_features cp;
578
579 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200580 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
581 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582 }
583
584 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
585 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
587 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588 }
589}
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594 struct hci_cp_write_def_link_policy cp;
595 u16 link_policy = 0;
596
597 if (lmp_rswitch_capable(hdev))
598 link_policy |= HCI_LP_RSWITCH;
599 if (lmp_hold_capable(hdev))
600 link_policy |= HCI_LP_HOLD;
601 if (lmp_sniff_capable(hdev))
602 link_policy |= HCI_LP_SNIFF;
603 if (lmp_park_capable(hdev))
604 link_policy |= HCI_LP_PARK;
605
606 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613 struct hci_cp_write_le_host_supported cp;
614
Johan Hedbergc73eee92013-04-19 18:35:21 +0300615 /* LE-only devices do not support explicit enablement */
616 if (!lmp_bredr_capable(hdev))
617 return;
618
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619 memset(&cp, 0, sizeof(cp));
620
621 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
622 cp.le = 0x01;
623 cp.simul = lmp_le_br_capable(hdev);
624 }
625
626 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200627 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
628 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200629}
630
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300631static void hci_set_event_mask_page_2(struct hci_request *req)
632{
633 struct hci_dev *hdev = req->hdev;
634 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
635
636 /* If Connectionless Slave Broadcast master role is supported
637 * enable all necessary events for it.
638 */
639 if (hdev->features[2][0] & 0x01) {
640 events[1] |= 0x40; /* Triggered Clock Capture */
641 events[1] |= 0x80; /* Synchronization Train Complete */
642 events[2] |= 0x10; /* Slave Page Response Timeout */
643 events[2] |= 0x20; /* CSB Channel Map Change */
644 }
645
646 /* If Connectionless Slave Broadcast slave role is supported
647 * enable all necessary events for it.
648 */
649 if (hdev->features[2][0] & 0x02) {
650 events[2] |= 0x01; /* Synchronization Train Received */
651 events[2] |= 0x02; /* CSB Receive */
652 events[2] |= 0x04; /* CSB Timeout */
653 events[2] |= 0x08; /* Truncated Page Complete */
654 }
655
656 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
657}
658
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300662 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100664 /* Some Broadcom based Bluetooth controllers do not support the
665 * Delete Stored Link Key command. They are clearly indicating its
666 * absence in the bit mask of supported commands.
667 *
668 * Check the supported commands and only if the the command is marked
669 * as supported send it. If not supported assume that the controller
670 * does not have actual support for stored link keys which makes this
671 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700672 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300673 if (hdev->commands[6] & 0x80) {
674 struct hci_cp_delete_stored_link_key cp;
675
676 bacpy(&cp.bdaddr, BDADDR_ANY);
677 cp.delete_all = 0x01;
678 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
679 sizeof(cp), &cp);
680 }
681
Johan Hedberg2177bab2013-03-05 20:37:43 +0200682 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200684
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500685 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500687 hci_update_ad(req);
688 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300689
690 /* Read features beyond page 1 if available */
691 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
692 struct hci_cp_read_local_ext_features cp;
693
694 cp.page = p;
695 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
696 sizeof(cp), &cp);
697 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698}
699
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300700static void hci_init4_req(struct hci_request *req, unsigned long opt)
701{
702 struct hci_dev *hdev = req->hdev;
703
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300704 /* Set event mask page 2 if the HCI command for it is supported */
705 if (hdev->commands[22] & 0x04)
706 hci_set_event_mask_page_2(req);
707
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300708 /* Check for Synchronization Train support */
709 if (hdev->features[2][0] & 0x04)
710 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
711}
712
Johan Hedberg2177bab2013-03-05 20:37:43 +0200713static int __hci_init(struct hci_dev *hdev)
714{
715 int err;
716
717 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
718 if (err < 0)
719 return err;
720
721 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
722 * BR/EDR/LE type controllers. AMP controllers only need the
723 * first stage init.
724 */
725 if (hdev->dev_type != HCI_BREDR)
726 return 0;
727
728 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
729 if (err < 0)
730 return err;
731
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300732 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
733 if (err < 0)
734 return err;
735
736 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200737}
738
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
741 __u8 scan = opt;
742
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
745 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
751 __u8 auth = opt;
752
Johan Hedberg42c6b122013-03-05 20:37:49 +0200753 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760{
761 __u8 encrypt = opt;
762
Johan Hedberg42c6b122013-03-05 20:37:49 +0200763 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200765 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767}
768
Johan Hedberg42c6b122013-03-05 20:37:49 +0200769static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200770{
771 __le16 policy = cpu_to_le16(opt);
772
Johan Hedberg42c6b122013-03-05 20:37:49 +0200773 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200774
775 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200776 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200777}
778
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900779/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 * Device is held on return. */
781struct hci_dev *hci_dev_get(int index)
782{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200783 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 BT_DBG("%d", index);
786
787 if (index < 0)
788 return NULL;
789
790 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200791 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (d->id == index) {
793 hdev = hci_dev_hold(d);
794 break;
795 }
796 }
797 read_unlock(&hci_dev_list_lock);
798 return hdev;
799}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
801/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200802
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803bool hci_discovery_active(struct hci_dev *hdev)
804{
805 struct discovery_state *discov = &hdev->discovery;
806
Andre Guedes6fbe1952012-02-03 17:47:58 -0300807 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300808 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300809 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200810 return true;
811
Andre Guedes6fbe1952012-02-03 17:47:58 -0300812 default:
813 return false;
814 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200815}
816
Johan Hedbergff9ef572012-01-04 14:23:45 +0200817void hci_discovery_set_state(struct hci_dev *hdev, int state)
818{
819 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
820
821 if (hdev->discovery.state == state)
822 return;
823
824 switch (state) {
825 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300826 if (hdev->discovery.state != DISCOVERY_STARTING)
827 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200828 break;
829 case DISCOVERY_STARTING:
830 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300831 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200832 mgmt_discovering(hdev, 1);
833 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200834 case DISCOVERY_RESOLVING:
835 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200836 case DISCOVERY_STOPPING:
837 break;
838 }
839
840 hdev->discovery.state = state;
841}
842
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300843void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
Johan Hedberg30883512012-01-04 14:16:21 +0200845 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200846 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Johan Hedberg561aafb2012-01-04 13:31:59 +0200848 list_for_each_entry_safe(p, n, &cache->all, all) {
849 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200850 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852
853 INIT_LIST_HEAD(&cache->unknown);
854 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300857struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
858 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
Johan Hedberg30883512012-01-04 14:16:21 +0200860 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 struct inquiry_entry *e;
862
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300863 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Johan Hedberg561aafb2012-01-04 13:31:59 +0200865 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200867 return e;
868 }
869
870 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
872
Johan Hedberg561aafb2012-01-04 13:31:59 +0200873struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300874 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200875{
Johan Hedberg30883512012-01-04 14:16:21 +0200876 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200877 struct inquiry_entry *e;
878
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300879 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200880
881 list_for_each_entry(e, &cache->unknown, list) {
882 if (!bacmp(&e->data.bdaddr, bdaddr))
883 return e;
884 }
885
886 return NULL;
887}
888
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200889struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300890 bdaddr_t *bdaddr,
891 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200892{
893 struct discovery_state *cache = &hdev->discovery;
894 struct inquiry_entry *e;
895
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300896 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200897
898 list_for_each_entry(e, &cache->resolve, list) {
899 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
900 return e;
901 if (!bacmp(&e->data.bdaddr, bdaddr))
902 return e;
903 }
904
905 return NULL;
906}
907
Johan Hedberga3d4e202012-01-09 00:53:02 +0200908void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300909 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200910{
911 struct discovery_state *cache = &hdev->discovery;
912 struct list_head *pos = &cache->resolve;
913 struct inquiry_entry *p;
914
915 list_del(&ie->list);
916
917 list_for_each_entry(p, &cache->resolve, list) {
918 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300919 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200920 break;
921 pos = &p->list;
922 }
923
924 list_add(&ie->list, pos);
925}
926
Johan Hedberg31754052012-01-04 13:39:52 +0200927bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300928 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
Johan Hedberg30883512012-01-04 14:16:21 +0200930 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200931 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300933 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
Szymon Janc2b2fec42012-11-20 11:38:54 +0100935 hci_remove_remote_oob_data(hdev, &data->bdaddr);
936
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200937 if (ssp)
938 *ssp = data->ssp_mode;
939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200940 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200941 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200942 if (ie->data.ssp_mode && ssp)
943 *ssp = true;
944
Johan Hedberga3d4e202012-01-09 00:53:02 +0200945 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300946 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200947 ie->data.rssi = data->rssi;
948 hci_inquiry_cache_update_resolve(hdev, ie);
949 }
950
Johan Hedberg561aafb2012-01-04 13:31:59 +0200951 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200952 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200953
Johan Hedberg561aafb2012-01-04 13:31:59 +0200954 /* Entry not in the cache. Add new one. */
955 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
956 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200957 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200958
959 list_add(&ie->all, &cache->all);
960
961 if (name_known) {
962 ie->name_state = NAME_KNOWN;
963 } else {
964 ie->name_state = NAME_NOT_KNOWN;
965 list_add(&ie->list, &cache->unknown);
966 }
967
968update:
969 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300970 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200971 ie->name_state = NAME_KNOWN;
972 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 }
974
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200975 memcpy(&ie->data, data, sizeof(*data));
976 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200978
979 if (ie->name_state == NAME_NOT_KNOWN)
980 return false;
981
982 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
985static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
986{
Johan Hedberg30883512012-01-04 14:16:21 +0200987 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 struct inquiry_info *info = (struct inquiry_info *) buf;
989 struct inquiry_entry *e;
990 int copied = 0;
991
Johan Hedberg561aafb2012-01-04 13:31:59 +0200992 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200994
995 if (copied >= num)
996 break;
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 bacpy(&info->bdaddr, &data->bdaddr);
999 info->pscan_rep_mode = data->pscan_rep_mode;
1000 info->pscan_period_mode = data->pscan_period_mode;
1001 info->pscan_mode = data->pscan_mode;
1002 memcpy(info->dev_class, data->dev_class, 3);
1003 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001006 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 }
1008
1009 BT_DBG("cache %p, copied %d", cache, copied);
1010 return copied;
1011}
1012
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014{
1015 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 struct hci_cp_inquiry cp;
1018
1019 BT_DBG("%s", hdev->name);
1020
1021 if (test_bit(HCI_INQUIRY, &hdev->flags))
1022 return;
1023
1024 /* Start Inquiry */
1025 memcpy(&cp.lap, &ir->lap, 3);
1026 cp.length = ir->length;
1027 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029}
1030
Andre Guedes3e13fa12013-03-27 20:04:56 -03001031static int wait_inquiry(void *word)
1032{
1033 schedule();
1034 return signal_pending(current);
1035}
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037int hci_inquiry(void __user *arg)
1038{
1039 __u8 __user *ptr = arg;
1040 struct hci_inquiry_req ir;
1041 struct hci_dev *hdev;
1042 int err = 0, do_inquiry = 0, max_rsp;
1043 long timeo;
1044 __u8 *buf;
1045
1046 if (copy_from_user(&ir, ptr, sizeof(ir)))
1047 return -EFAULT;
1048
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001049 hdev = hci_dev_get(ir.dev_id);
1050 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 return -ENODEV;
1052
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001053 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1054 err = -EBUSY;
1055 goto done;
1056 }
1057
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001058 if (hdev->dev_type != HCI_BREDR) {
1059 err = -EOPNOTSUPP;
1060 goto done;
1061 }
1062
Johan Hedberg56f87902013-10-02 13:43:13 +03001063 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1064 err = -EOPNOTSUPP;
1065 goto done;
1066 }
1067
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001068 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001069 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001070 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001071 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 do_inquiry = 1;
1073 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001074 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Marcel Holtmann04837f62006-07-03 10:02:33 +02001076 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001077
1078 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001079 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1080 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001081 if (err < 0)
1082 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001083
1084 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1085 * cleared). If it is interrupted by a signal, return -EINTR.
1086 */
1087 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1088 TASK_INTERRUPTIBLE))
1089 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001092 /* for unlimited number of responses we will use buffer with
1093 * 255 entries
1094 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1096
1097 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1098 * copy it to the user space.
1099 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001100 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001101 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 err = -ENOMEM;
1103 goto done;
1104 }
1105
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001106 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001108 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
1110 BT_DBG("num_rsp %d", ir.num_rsp);
1111
1112 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1113 ptr += sizeof(ir);
1114 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001115 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001117 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 err = -EFAULT;
1119
1120 kfree(buf);
1121
1122done:
1123 hci_dev_put(hdev);
1124 return err;
1125}
1126
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001127static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1128{
1129 u8 ad_len = 0, flags = 0;
1130 size_t name_len;
1131
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001132 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001133 flags |= LE_AD_GENERAL;
1134
Johan Hedberg11802b22013-10-02 16:02:24 +03001135 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1136 if (lmp_le_br_capable(hdev))
1137 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1138 if (lmp_host_le_br_capable(hdev))
1139 flags |= LE_AD_SIM_LE_BREDR_HOST;
1140 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001141 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001142 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001143
1144 if (flags) {
1145 BT_DBG("adv flags 0x%02x", flags);
1146
1147 ptr[0] = 2;
1148 ptr[1] = EIR_FLAGS;
1149 ptr[2] = flags;
1150
1151 ad_len += 3;
1152 ptr += 3;
1153 }
1154
1155 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1156 ptr[0] = 2;
1157 ptr[1] = EIR_TX_POWER;
1158 ptr[2] = (u8) hdev->adv_tx_power;
1159
1160 ad_len += 3;
1161 ptr += 3;
1162 }
1163
1164 name_len = strlen(hdev->dev_name);
1165 if (name_len > 0) {
1166 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1167
1168 if (name_len > max_len) {
1169 name_len = max_len;
1170 ptr[1] = EIR_NAME_SHORT;
1171 } else
1172 ptr[1] = EIR_NAME_COMPLETE;
1173
1174 ptr[0] = name_len + 1;
1175
1176 memcpy(ptr + 2, hdev->dev_name, name_len);
1177
1178 ad_len += (name_len + 2);
1179 ptr += (name_len + 2);
1180 }
1181
1182 return ad_len;
1183}
1184
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001185void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001186{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001187 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001188 struct hci_cp_le_set_adv_data cp;
1189 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001190
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001191 if (!lmp_le_capable(hdev))
1192 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001193
1194 memset(&cp, 0, sizeof(cp));
1195
1196 len = create_ad(hdev, cp.data);
1197
1198 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001199 memcmp(cp.data, hdev->adv_data, len) == 0)
1200 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001201
1202 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1203 hdev->adv_data_len = len;
1204
1205 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001206
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001207 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001208}
1209
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001210static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 int ret = 0;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 BT_DBG("%s %p", hdev->name, hdev);
1215
1216 hci_req_lock(hdev);
1217
Johan Hovold94324962012-03-15 14:48:41 +01001218 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1219 ret = -ENODEV;
1220 goto done;
1221 }
1222
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001223 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1224 /* Check for rfkill but allow the HCI setup stage to
1225 * proceed (which in itself doesn't cause any RF activity).
1226 */
1227 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1228 ret = -ERFKILL;
1229 goto done;
1230 }
1231
1232 /* Check for valid public address or a configured static
1233 * random adddress, but let the HCI setup proceed to
1234 * be able to determine if there is a public address
1235 * or not.
1236 *
1237 * This check is only valid for BR/EDR controllers
1238 * since AMP controllers do not have an address.
1239 */
1240 if (hdev->dev_type == HCI_BREDR &&
1241 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1242 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1243 ret = -EADDRNOTAVAIL;
1244 goto done;
1245 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001246 }
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 if (test_bit(HCI_UP, &hdev->flags)) {
1249 ret = -EALREADY;
1250 goto done;
1251 }
1252
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 if (hdev->open(hdev)) {
1254 ret = -EIO;
1255 goto done;
1256 }
1257
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001258 atomic_set(&hdev->cmd_cnt, 1);
1259 set_bit(HCI_INIT, &hdev->flags);
1260
1261 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1262 ret = hdev->setup(hdev);
1263
1264 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001265 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1266 set_bit(HCI_RAW, &hdev->flags);
1267
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001268 if (!test_bit(HCI_RAW, &hdev->flags) &&
1269 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001270 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
1272
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001273 clear_bit(HCI_INIT, &hdev->flags);
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 if (!ret) {
1276 hci_dev_hold(hdev);
1277 set_bit(HCI_UP, &hdev->flags);
1278 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001279 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001280 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001281 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001282 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001283 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001284 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001285 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001286 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001288 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001289 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001290 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
1292 skb_queue_purge(&hdev->cmd_q);
1293 skb_queue_purge(&hdev->rx_q);
1294
1295 if (hdev->flush)
1296 hdev->flush(hdev);
1297
1298 if (hdev->sent_cmd) {
1299 kfree_skb(hdev->sent_cmd);
1300 hdev->sent_cmd = NULL;
1301 }
1302
1303 hdev->close(hdev);
1304 hdev->flags = 0;
1305 }
1306
1307done:
1308 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return ret;
1310}
1311
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001312/* ---- HCI ioctl helpers ---- */
1313
1314int hci_dev_open(__u16 dev)
1315{
1316 struct hci_dev *hdev;
1317 int err;
1318
1319 hdev = hci_dev_get(dev);
1320 if (!hdev)
1321 return -ENODEV;
1322
Johan Hedberge1d08f42013-10-01 22:44:50 +03001323 /* We need to ensure that no other power on/off work is pending
1324 * before proceeding to call hci_dev_do_open. This is
1325 * particularly important if the setup procedure has not yet
1326 * completed.
1327 */
1328 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1329 cancel_delayed_work(&hdev->power_off);
1330
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001331 /* After this call it is guaranteed that the setup procedure
1332 * has finished. This means that error conditions like RFKILL
1333 * or no valid public or static random address apply.
1334 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001335 flush_workqueue(hdev->req_workqueue);
1336
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001337 err = hci_dev_do_open(hdev);
1338
1339 hci_dev_put(hdev);
1340
1341 return err;
1342}
1343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344static int hci_dev_do_close(struct hci_dev *hdev)
1345{
1346 BT_DBG("%s %p", hdev->name, hdev);
1347
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001348 cancel_delayed_work(&hdev->power_off);
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 hci_req_cancel(hdev, ENODEV);
1351 hci_req_lock(hdev);
1352
1353 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001354 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 hci_req_unlock(hdev);
1356 return 0;
1357 }
1358
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001359 /* Flush RX and TX works */
1360 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001361 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001363 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001364 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001365 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001366 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001367 }
1368
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001369 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001370 cancel_delayed_work(&hdev->service_cache);
1371
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001372 cancel_delayed_work_sync(&hdev->le_scan_disable);
1373
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001374 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001375 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001377 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
1379 hci_notify(hdev, HCI_DEV_DOWN);
1380
1381 if (hdev->flush)
1382 hdev->flush(hdev);
1383
1384 /* Reset device */
1385 skb_queue_purge(&hdev->cmd_q);
1386 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001387 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001388 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001389 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001391 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 clear_bit(HCI_INIT, &hdev->flags);
1393 }
1394
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001395 /* flush cmd work */
1396 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 /* Drop queues */
1399 skb_queue_purge(&hdev->rx_q);
1400 skb_queue_purge(&hdev->cmd_q);
1401 skb_queue_purge(&hdev->raw_q);
1402
1403 /* Drop last sent command */
1404 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001405 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 kfree_skb(hdev->sent_cmd);
1407 hdev->sent_cmd = NULL;
1408 }
1409
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001410 kfree_skb(hdev->recv_evt);
1411 hdev->recv_evt = NULL;
1412
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 /* After this point our queues are empty
1414 * and no tasks are scheduled. */
1415 hdev->close(hdev);
1416
Johan Hedberg35b973c2013-03-15 17:06:59 -05001417 /* Clear flags */
1418 hdev->flags = 0;
1419 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1420
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001421 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1422 if (hdev->dev_type == HCI_BREDR) {
1423 hci_dev_lock(hdev);
1424 mgmt_powered(hdev, 0);
1425 hci_dev_unlock(hdev);
1426 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001427 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001428
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001429 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001430 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001431
Johan Hedberge59fda82012-02-22 18:11:53 +02001432 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001433 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 hci_req_unlock(hdev);
1436
1437 hci_dev_put(hdev);
1438 return 0;
1439}
1440
1441int hci_dev_close(__u16 dev)
1442{
1443 struct hci_dev *hdev;
1444 int err;
1445
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001446 hdev = hci_dev_get(dev);
1447 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001449
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001450 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1451 err = -EBUSY;
1452 goto done;
1453 }
1454
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001455 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1456 cancel_delayed_work(&hdev->power_off);
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001459
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001460done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_dev_put(hdev);
1462 return err;
1463}
1464
1465int hci_dev_reset(__u16 dev)
1466{
1467 struct hci_dev *hdev;
1468 int ret = 0;
1469
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001470 hdev = hci_dev_get(dev);
1471 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 return -ENODEV;
1473
1474 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Marcel Holtmann808a0492013-08-26 20:57:58 -07001476 if (!test_bit(HCI_UP, &hdev->flags)) {
1477 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001481 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1482 ret = -EBUSY;
1483 goto done;
1484 }
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 /* Drop queues */
1487 skb_queue_purge(&hdev->rx_q);
1488 skb_queue_purge(&hdev->cmd_q);
1489
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001490 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001491 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001493 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 if (hdev->flush)
1496 hdev->flush(hdev);
1497
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001498 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001499 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001502 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 hci_req_unlock(hdev);
1506 hci_dev_put(hdev);
1507 return ret;
1508}
1509
1510int hci_dev_reset_stat(__u16 dev)
1511{
1512 struct hci_dev *hdev;
1513 int ret = 0;
1514
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001515 hdev = hci_dev_get(dev);
1516 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENODEV;
1518
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001519 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1520 ret = -EBUSY;
1521 goto done;
1522 }
1523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1525
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001526done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return ret;
1529}
1530
1531int hci_dev_cmd(unsigned int cmd, void __user *arg)
1532{
1533 struct hci_dev *hdev;
1534 struct hci_dev_req dr;
1535 int err = 0;
1536
1537 if (copy_from_user(&dr, arg, sizeof(dr)))
1538 return -EFAULT;
1539
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001540 hdev = hci_dev_get(dr.dev_id);
1541 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 return -ENODEV;
1543
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1545 err = -EBUSY;
1546 goto done;
1547 }
1548
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001549 if (hdev->dev_type != HCI_BREDR) {
1550 err = -EOPNOTSUPP;
1551 goto done;
1552 }
1553
Johan Hedberg56f87902013-10-02 13:43:13 +03001554 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1555 err = -EOPNOTSUPP;
1556 goto done;
1557 }
1558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 switch (cmd) {
1560 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001561 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1562 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 break;
1564
1565 case HCISETENCRYPT:
1566 if (!lmp_encrypt_capable(hdev)) {
1567 err = -EOPNOTSUPP;
1568 break;
1569 }
1570
1571 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1572 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001573 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1574 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if (err)
1576 break;
1577 }
1578
Johan Hedberg01178cd2013-03-05 20:37:41 +02001579 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1580 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 break;
1582
1583 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001584 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1585 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 break;
1587
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001588 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001589 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1590 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001591 break;
1592
1593 case HCISETLINKMODE:
1594 hdev->link_mode = ((__u16) dr.dev_opt) &
1595 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1596 break;
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 case HCISETPTYPE:
1599 hdev->pkt_type = (__u16) dr.dev_opt;
1600 break;
1601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001603 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1604 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 break;
1606
1607 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001608 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1609 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 break;
1611
1612 default:
1613 err = -EINVAL;
1614 break;
1615 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001616
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001617done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 hci_dev_put(hdev);
1619 return err;
1620}
1621
1622int hci_get_dev_list(void __user *arg)
1623{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001624 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 struct hci_dev_list_req *dl;
1626 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 int n = 0, size, err;
1628 __u16 dev_num;
1629
1630 if (get_user(dev_num, (__u16 __user *) arg))
1631 return -EFAULT;
1632
1633 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1634 return -EINVAL;
1635
1636 size = sizeof(*dl) + dev_num * sizeof(*dr);
1637
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001638 dl = kzalloc(size, GFP_KERNEL);
1639 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 return -ENOMEM;
1641
1642 dr = dl->dev_req;
1643
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001644 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001645 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001646 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001647 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001648
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001649 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1650 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 (dr + n)->dev_id = hdev->id;
1653 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (++n >= dev_num)
1656 break;
1657 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001658 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
1660 dl->dev_num = n;
1661 size = sizeof(*dl) + n * sizeof(*dr);
1662
1663 err = copy_to_user(arg, dl, size);
1664 kfree(dl);
1665
1666 return err ? -EFAULT : 0;
1667}
1668
1669int hci_get_dev_info(void __user *arg)
1670{
1671 struct hci_dev *hdev;
1672 struct hci_dev_info di;
1673 int err = 0;
1674
1675 if (copy_from_user(&di, arg, sizeof(di)))
1676 return -EFAULT;
1677
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001678 hdev = hci_dev_get(di.dev_id);
1679 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 return -ENODEV;
1681
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001682 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001683 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001684
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001685 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1686 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 strcpy(di.name, hdev->name);
1689 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001690 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 di.flags = hdev->flags;
1692 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001693 if (lmp_bredr_capable(hdev)) {
1694 di.acl_mtu = hdev->acl_mtu;
1695 di.acl_pkts = hdev->acl_pkts;
1696 di.sco_mtu = hdev->sco_mtu;
1697 di.sco_pkts = hdev->sco_pkts;
1698 } else {
1699 di.acl_mtu = hdev->le_mtu;
1700 di.acl_pkts = hdev->le_pkts;
1701 di.sco_mtu = 0;
1702 di.sco_pkts = 0;
1703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 di.link_policy = hdev->link_policy;
1705 di.link_mode = hdev->link_mode;
1706
1707 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1708 memcpy(&di.features, &hdev->features, sizeof(di.features));
1709
1710 if (copy_to_user(arg, &di, sizeof(di)))
1711 err = -EFAULT;
1712
1713 hci_dev_put(hdev);
1714
1715 return err;
1716}
1717
1718/* ---- Interface to HCI drivers ---- */
1719
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001720static int hci_rfkill_set_block(void *data, bool blocked)
1721{
1722 struct hci_dev *hdev = data;
1723
1724 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1725
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1727 return -EBUSY;
1728
Johan Hedberg5e130362013-09-13 08:58:17 +03001729 if (blocked) {
1730 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001731 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1732 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001733 } else {
1734 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001735 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001736
1737 return 0;
1738}
1739
1740static const struct rfkill_ops hci_rfkill_ops = {
1741 .set_block = hci_rfkill_set_block,
1742};
1743
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001744static void hci_power_on(struct work_struct *work)
1745{
1746 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001747 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001748
1749 BT_DBG("%s", hdev->name);
1750
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001751 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001752 if (err < 0) {
1753 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001754 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001755 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001757 /* During the HCI setup phase, a few error conditions are
1758 * ignored and they need to be checked now. If they are still
1759 * valid, it is important to turn the device back off.
1760 */
1761 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1762 (hdev->dev_type == HCI_BREDR &&
1763 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1764 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001765 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1766 hci_dev_do_close(hdev);
1767 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001768 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1769 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001770 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001771
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001772 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001773 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001774}
1775
1776static void hci_power_off(struct work_struct *work)
1777{
Johan Hedberg32435532011-11-07 22:16:04 +02001778 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001779 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001780
1781 BT_DBG("%s", hdev->name);
1782
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001783 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001784}
1785
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001786static void hci_discov_off(struct work_struct *work)
1787{
1788 struct hci_dev *hdev;
1789 u8 scan = SCAN_PAGE;
1790
1791 hdev = container_of(work, struct hci_dev, discov_off.work);
1792
1793 BT_DBG("%s", hdev->name);
1794
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001795 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001796
1797 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1798
1799 hdev->discov_timeout = 0;
1800
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001801 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001802}
1803
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001804int hci_uuids_clear(struct hci_dev *hdev)
1805{
Johan Hedberg48210022013-01-27 00:31:28 +02001806 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001807
Johan Hedberg48210022013-01-27 00:31:28 +02001808 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1809 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001810 kfree(uuid);
1811 }
1812
1813 return 0;
1814}
1815
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001816int hci_link_keys_clear(struct hci_dev *hdev)
1817{
1818 struct list_head *p, *n;
1819
1820 list_for_each_safe(p, n, &hdev->link_keys) {
1821 struct link_key *key;
1822
1823 key = list_entry(p, struct link_key, list);
1824
1825 list_del(p);
1826 kfree(key);
1827 }
1828
1829 return 0;
1830}
1831
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001832int hci_smp_ltks_clear(struct hci_dev *hdev)
1833{
1834 struct smp_ltk *k, *tmp;
1835
1836 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1837 list_del(&k->list);
1838 kfree(k);
1839 }
1840
1841 return 0;
1842}
1843
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001844struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1845{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001846 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001847
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001848 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001849 if (bacmp(bdaddr, &k->bdaddr) == 0)
1850 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001851
1852 return NULL;
1853}
1854
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301855static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001856 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001857{
1858 /* Legacy key */
1859 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301860 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001861
1862 /* Debug keys are insecure so don't store them persistently */
1863 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301864 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001865
1866 /* Changed combination key and there's no previous one */
1867 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301868 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001869
1870 /* Security mode 3 case */
1871 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301872 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001873
1874 /* Neither local nor remote side had no-bonding as requirement */
1875 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301876 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001877
1878 /* Local side had dedicated bonding as requirement */
1879 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301880 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001881
1882 /* Remote side had dedicated bonding as requirement */
1883 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301884 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001885
1886 /* If none of the above criteria match, then don't store the key
1887 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301888 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001889}
1890
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001891struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001892{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001893 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001894
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001895 list_for_each_entry(k, &hdev->long_term_keys, list) {
1896 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001897 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001898 continue;
1899
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001900 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001901 }
1902
1903 return NULL;
1904}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001905
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001906struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001907 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001908{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001909 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001910
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001911 list_for_each_entry(k, &hdev->long_term_keys, list)
1912 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001913 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001914 return k;
1915
1916 return NULL;
1917}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001918
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001919int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001920 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001921{
1922 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301923 u8 old_key_type;
1924 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001925
1926 old_key = hci_find_link_key(hdev, bdaddr);
1927 if (old_key) {
1928 old_key_type = old_key->type;
1929 key = old_key;
1930 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001931 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001932 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1933 if (!key)
1934 return -ENOMEM;
1935 list_add(&key->list, &hdev->link_keys);
1936 }
1937
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001938 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001939
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001940 /* Some buggy controller combinations generate a changed
1941 * combination key for legacy pairing even when there's no
1942 * previous key */
1943 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001944 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001945 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001946 if (conn)
1947 conn->key_type = type;
1948 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001949
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001950 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001951 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001952 key->pin_len = pin_len;
1953
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001954 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001955 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001956 else
1957 key->type = type;
1958
Johan Hedberg4df378a2011-04-28 11:29:03 -07001959 if (!new_key)
1960 return 0;
1961
1962 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1963
Johan Hedberg744cf192011-11-08 20:40:14 +02001964 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001965
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301966 if (conn)
1967 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001968
1969 return 0;
1970}
1971
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001972int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001973 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001974 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001975{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001976 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001977
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001978 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1979 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001980
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001981 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1982 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001983 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001984 else {
1985 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001986 if (!key)
1987 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001988 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001989 }
1990
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001991 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001992 key->bdaddr_type = addr_type;
1993 memcpy(key->val, tk, sizeof(key->val));
1994 key->authenticated = authenticated;
1995 key->ediv = ediv;
1996 key->enc_size = enc_size;
1997 key->type = type;
1998 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001999
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002000 if (!new_key)
2001 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002002
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002003 if (type & HCI_SMP_LTK)
2004 mgmt_new_ltk(hdev, key, 1);
2005
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002006 return 0;
2007}
2008
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002009int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2010{
2011 struct link_key *key;
2012
2013 key = hci_find_link_key(hdev, bdaddr);
2014 if (!key)
2015 return -ENOENT;
2016
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002017 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002018
2019 list_del(&key->list);
2020 kfree(key);
2021
2022 return 0;
2023}
2024
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002025int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2026{
2027 struct smp_ltk *k, *tmp;
2028
2029 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2030 if (bacmp(bdaddr, &k->bdaddr))
2031 continue;
2032
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002033 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002034
2035 list_del(&k->list);
2036 kfree(k);
2037 }
2038
2039 return 0;
2040}
2041
Ville Tervo6bd32322011-02-16 16:32:41 +02002042/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002043static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002044{
2045 struct hci_dev *hdev = (void *) arg;
2046
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002047 if (hdev->sent_cmd) {
2048 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2049 u16 opcode = __le16_to_cpu(sent->opcode);
2050
2051 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2052 } else {
2053 BT_ERR("%s command tx timeout", hdev->name);
2054 }
2055
Ville Tervo6bd32322011-02-16 16:32:41 +02002056 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002057 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002058}
2059
Szymon Janc2763eda2011-03-22 13:12:22 +01002060struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002061 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002062{
2063 struct oob_data *data;
2064
2065 list_for_each_entry(data, &hdev->remote_oob_data, list)
2066 if (bacmp(bdaddr, &data->bdaddr) == 0)
2067 return data;
2068
2069 return NULL;
2070}
2071
2072int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2073{
2074 struct oob_data *data;
2075
2076 data = hci_find_remote_oob_data(hdev, bdaddr);
2077 if (!data)
2078 return -ENOENT;
2079
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002080 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002081
2082 list_del(&data->list);
2083 kfree(data);
2084
2085 return 0;
2086}
2087
2088int hci_remote_oob_data_clear(struct hci_dev *hdev)
2089{
2090 struct oob_data *data, *n;
2091
2092 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2093 list_del(&data->list);
2094 kfree(data);
2095 }
2096
2097 return 0;
2098}
2099
2100int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002101 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002102{
2103 struct oob_data *data;
2104
2105 data = hci_find_remote_oob_data(hdev, bdaddr);
2106
2107 if (!data) {
2108 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2109 if (!data)
2110 return -ENOMEM;
2111
2112 bacpy(&data->bdaddr, bdaddr);
2113 list_add(&data->list, &hdev->remote_oob_data);
2114 }
2115
2116 memcpy(data->hash, hash, sizeof(data->hash));
2117 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2118
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002119 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002120
2121 return 0;
2122}
2123
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002124struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002125{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002126 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002127
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002128 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002129 if (bacmp(bdaddr, &b->bdaddr) == 0)
2130 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002131
2132 return NULL;
2133}
2134
2135int hci_blacklist_clear(struct hci_dev *hdev)
2136{
2137 struct list_head *p, *n;
2138
2139 list_for_each_safe(p, n, &hdev->blacklist) {
2140 struct bdaddr_list *b;
2141
2142 b = list_entry(p, struct bdaddr_list, list);
2143
2144 list_del(p);
2145 kfree(b);
2146 }
2147
2148 return 0;
2149}
2150
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002151int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152{
2153 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002154
2155 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2156 return -EBADF;
2157
Antti Julku5e762442011-08-25 16:48:02 +03002158 if (hci_blacklist_lookup(hdev, bdaddr))
2159 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002160
2161 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002162 if (!entry)
2163 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002164
2165 bacpy(&entry->bdaddr, bdaddr);
2166
2167 list_add(&entry->list, &hdev->blacklist);
2168
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002169 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002170}
2171
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002172int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002173{
2174 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002175
Szymon Janc1ec918c2011-11-16 09:32:21 +01002176 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002177 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002178
2179 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002180 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002181 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002182
2183 list_del(&entry->list);
2184 kfree(entry);
2185
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002186 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002187}
2188
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002189static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002190{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002191 if (status) {
2192 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002193
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002194 hci_dev_lock(hdev);
2195 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2196 hci_dev_unlock(hdev);
2197 return;
2198 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002199}
2200
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002201static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002202{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002203 /* General inquiry access code (GIAC) */
2204 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2205 struct hci_request req;
2206 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002207 int err;
2208
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002209 if (status) {
2210 BT_ERR("Failed to disable LE scanning: status %d", status);
2211 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002212 }
2213
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002214 switch (hdev->discovery.type) {
2215 case DISCOV_TYPE_LE:
2216 hci_dev_lock(hdev);
2217 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2218 hci_dev_unlock(hdev);
2219 break;
2220
2221 case DISCOV_TYPE_INTERLEAVED:
2222 hci_req_init(&req, hdev);
2223
2224 memset(&cp, 0, sizeof(cp));
2225 memcpy(&cp.lap, lap, sizeof(cp.lap));
2226 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2227 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2228
2229 hci_dev_lock(hdev);
2230
2231 hci_inquiry_cache_flush(hdev);
2232
2233 err = hci_req_run(&req, inquiry_complete);
2234 if (err) {
2235 BT_ERR("Inquiry request failed: err %d", err);
2236 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2237 }
2238
2239 hci_dev_unlock(hdev);
2240 break;
2241 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002242}
2243
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002244static void le_scan_disable_work(struct work_struct *work)
2245{
2246 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002247 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002248 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002249 struct hci_request req;
2250 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002251
2252 BT_DBG("%s", hdev->name);
2253
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002254 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002255
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002256 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002257 cp.enable = LE_SCAN_DISABLE;
2258 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002259
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002260 err = hci_req_run(&req, le_scan_disable_work_complete);
2261 if (err)
2262 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002263}
2264
David Herrmann9be0dab2012-04-22 14:39:57 +02002265/* Alloc HCI device */
2266struct hci_dev *hci_alloc_dev(void)
2267{
2268 struct hci_dev *hdev;
2269
2270 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2271 if (!hdev)
2272 return NULL;
2273
David Herrmannb1b813d2012-04-22 14:39:58 +02002274 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2275 hdev->esco_type = (ESCO_HV1);
2276 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002277 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2278 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002279 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2280 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002281
David Herrmannb1b813d2012-04-22 14:39:58 +02002282 hdev->sniff_max_interval = 800;
2283 hdev->sniff_min_interval = 80;
2284
Marcel Holtmannbef64732013-10-11 08:23:19 -07002285 hdev->le_scan_interval = 0x0060;
2286 hdev->le_scan_window = 0x0030;
2287
David Herrmannb1b813d2012-04-22 14:39:58 +02002288 mutex_init(&hdev->lock);
2289 mutex_init(&hdev->req_lock);
2290
2291 INIT_LIST_HEAD(&hdev->mgmt_pending);
2292 INIT_LIST_HEAD(&hdev->blacklist);
2293 INIT_LIST_HEAD(&hdev->uuids);
2294 INIT_LIST_HEAD(&hdev->link_keys);
2295 INIT_LIST_HEAD(&hdev->long_term_keys);
2296 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002297 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002298
2299 INIT_WORK(&hdev->rx_work, hci_rx_work);
2300 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2301 INIT_WORK(&hdev->tx_work, hci_tx_work);
2302 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002303
David Herrmannb1b813d2012-04-22 14:39:58 +02002304 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2305 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2306 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2307
David Herrmannb1b813d2012-04-22 14:39:58 +02002308 skb_queue_head_init(&hdev->rx_q);
2309 skb_queue_head_init(&hdev->cmd_q);
2310 skb_queue_head_init(&hdev->raw_q);
2311
2312 init_waitqueue_head(&hdev->req_wait_q);
2313
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002314 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002315
David Herrmannb1b813d2012-04-22 14:39:58 +02002316 hci_init_sysfs(hdev);
2317 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002318
2319 return hdev;
2320}
2321EXPORT_SYMBOL(hci_alloc_dev);
2322
2323/* Free HCI device */
2324void hci_free_dev(struct hci_dev *hdev)
2325{
David Herrmann9be0dab2012-04-22 14:39:57 +02002326 /* will free via device release */
2327 put_device(&hdev->dev);
2328}
2329EXPORT_SYMBOL(hci_free_dev);
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331/* Register HCI device */
2332int hci_register_dev(struct hci_dev *hdev)
2333{
David Herrmannb1b813d2012-04-22 14:39:58 +02002334 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
David Herrmann010666a2012-01-07 15:47:07 +01002336 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 return -EINVAL;
2338
Mat Martineau08add512011-11-02 16:18:36 -07002339 /* Do not allow HCI_AMP devices to register at index 0,
2340 * so the index can be used as the AMP controller ID.
2341 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002342 switch (hdev->dev_type) {
2343 case HCI_BREDR:
2344 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2345 break;
2346 case HCI_AMP:
2347 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2348 break;
2349 default:
2350 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002352
Sasha Levin3df92b32012-05-27 22:36:56 +02002353 if (id < 0)
2354 return id;
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 sprintf(hdev->name, "hci%d", id);
2357 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002358
2359 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2360
Kees Cookd8537542013-07-03 15:04:57 -07002361 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2362 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002363 if (!hdev->workqueue) {
2364 error = -ENOMEM;
2365 goto err;
2366 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002367
Kees Cookd8537542013-07-03 15:04:57 -07002368 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2369 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002370 if (!hdev->req_workqueue) {
2371 destroy_workqueue(hdev->workqueue);
2372 error = -ENOMEM;
2373 goto err;
2374 }
2375
David Herrmann33ca9542011-10-08 14:58:49 +02002376 error = hci_add_sysfs(hdev);
2377 if (error < 0)
2378 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002380 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002381 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2382 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002383 if (hdev->rfkill) {
2384 if (rfkill_register(hdev->rfkill) < 0) {
2385 rfkill_destroy(hdev->rfkill);
2386 hdev->rfkill = NULL;
2387 }
2388 }
2389
Johan Hedberg5e130362013-09-13 08:58:17 +03002390 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2391 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2392
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002393 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002394 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002395
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002396 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002397 /* Assume BR/EDR support until proven otherwise (such as
2398 * through reading supported features during init.
2399 */
2400 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2401 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002402
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002403 write_lock(&hci_dev_list_lock);
2404 list_add(&hdev->list, &hci_dev_list);
2405 write_unlock(&hci_dev_list_lock);
2406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002408 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
Johan Hedberg19202572013-01-14 22:33:51 +02002410 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002413
David Herrmann33ca9542011-10-08 14:58:49 +02002414err_wqueue:
2415 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002416 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002417err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002418 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002419
David Herrmann33ca9542011-10-08 14:58:49 +02002420 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421}
2422EXPORT_SYMBOL(hci_register_dev);
2423
2424/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002425void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426{
Sasha Levin3df92b32012-05-27 22:36:56 +02002427 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002428
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002429 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
Johan Hovold94324962012-03-15 14:48:41 +01002431 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2432
Sasha Levin3df92b32012-05-27 22:36:56 +02002433 id = hdev->id;
2434
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002435 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002437 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
2439 hci_dev_do_close(hdev);
2440
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302441 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002442 kfree_skb(hdev->reassembly[i]);
2443
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002444 cancel_work_sync(&hdev->power_on);
2445
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002446 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002447 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002448 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002449 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002450 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002451 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002452
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002453 /* mgmt_index_removed should take care of emptying the
2454 * pending list */
2455 BUG_ON(!list_empty(&hdev->mgmt_pending));
2456
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 hci_notify(hdev, HCI_DEV_UNREG);
2458
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002459 if (hdev->rfkill) {
2460 rfkill_unregister(hdev->rfkill);
2461 rfkill_destroy(hdev->rfkill);
2462 }
2463
David Herrmannce242972011-10-08 14:58:48 +02002464 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002465
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002466 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002467 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002468
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002469 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002470 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002471 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002472 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002473 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002474 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002475 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002476
David Herrmanndc946bd2012-01-07 15:47:24 +01002477 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002478
2479 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480}
2481EXPORT_SYMBOL(hci_unregister_dev);
2482
2483/* Suspend HCI device */
2484int hci_suspend_dev(struct hci_dev *hdev)
2485{
2486 hci_notify(hdev, HCI_DEV_SUSPEND);
2487 return 0;
2488}
2489EXPORT_SYMBOL(hci_suspend_dev);
2490
2491/* Resume HCI device */
2492int hci_resume_dev(struct hci_dev *hdev)
2493{
2494 hci_notify(hdev, HCI_DEV_RESUME);
2495 return 0;
2496}
2497EXPORT_SYMBOL(hci_resume_dev);
2498
Marcel Holtmann76bca882009-11-18 00:40:39 +01002499/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002500int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002501{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002502 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002503 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002504 kfree_skb(skb);
2505 return -ENXIO;
2506 }
2507
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002508 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002509 bt_cb(skb)->incoming = 1;
2510
2511 /* Time stamp */
2512 __net_timestamp(skb);
2513
Marcel Holtmann76bca882009-11-18 00:40:39 +01002514 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002515 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002516
Marcel Holtmann76bca882009-11-18 00:40:39 +01002517 return 0;
2518}
2519EXPORT_SYMBOL(hci_recv_frame);
2520
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302521static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002522 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302523{
2524 int len = 0;
2525 int hlen = 0;
2526 int remain = count;
2527 struct sk_buff *skb;
2528 struct bt_skb_cb *scb;
2529
2530 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002531 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302532 return -EILSEQ;
2533
2534 skb = hdev->reassembly[index];
2535
2536 if (!skb) {
2537 switch (type) {
2538 case HCI_ACLDATA_PKT:
2539 len = HCI_MAX_FRAME_SIZE;
2540 hlen = HCI_ACL_HDR_SIZE;
2541 break;
2542 case HCI_EVENT_PKT:
2543 len = HCI_MAX_EVENT_SIZE;
2544 hlen = HCI_EVENT_HDR_SIZE;
2545 break;
2546 case HCI_SCODATA_PKT:
2547 len = HCI_MAX_SCO_SIZE;
2548 hlen = HCI_SCO_HDR_SIZE;
2549 break;
2550 }
2551
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002552 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302553 if (!skb)
2554 return -ENOMEM;
2555
2556 scb = (void *) skb->cb;
2557 scb->expect = hlen;
2558 scb->pkt_type = type;
2559
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302560 hdev->reassembly[index] = skb;
2561 }
2562
2563 while (count) {
2564 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002565 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302566
2567 memcpy(skb_put(skb, len), data, len);
2568
2569 count -= len;
2570 data += len;
2571 scb->expect -= len;
2572 remain = count;
2573
2574 switch (type) {
2575 case HCI_EVENT_PKT:
2576 if (skb->len == HCI_EVENT_HDR_SIZE) {
2577 struct hci_event_hdr *h = hci_event_hdr(skb);
2578 scb->expect = h->plen;
2579
2580 if (skb_tailroom(skb) < scb->expect) {
2581 kfree_skb(skb);
2582 hdev->reassembly[index] = NULL;
2583 return -ENOMEM;
2584 }
2585 }
2586 break;
2587
2588 case HCI_ACLDATA_PKT:
2589 if (skb->len == HCI_ACL_HDR_SIZE) {
2590 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2591 scb->expect = __le16_to_cpu(h->dlen);
2592
2593 if (skb_tailroom(skb) < scb->expect) {
2594 kfree_skb(skb);
2595 hdev->reassembly[index] = NULL;
2596 return -ENOMEM;
2597 }
2598 }
2599 break;
2600
2601 case HCI_SCODATA_PKT:
2602 if (skb->len == HCI_SCO_HDR_SIZE) {
2603 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2604 scb->expect = h->dlen;
2605
2606 if (skb_tailroom(skb) < scb->expect) {
2607 kfree_skb(skb);
2608 hdev->reassembly[index] = NULL;
2609 return -ENOMEM;
2610 }
2611 }
2612 break;
2613 }
2614
2615 if (scb->expect == 0) {
2616 /* Complete frame */
2617
2618 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002619 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302620
2621 hdev->reassembly[index] = NULL;
2622 return remain;
2623 }
2624 }
2625
2626 return remain;
2627}
2628
Marcel Holtmannef222012007-07-11 06:42:04 +02002629int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2630{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302631 int rem = 0;
2632
Marcel Holtmannef222012007-07-11 06:42:04 +02002633 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2634 return -EILSEQ;
2635
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002636 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002637 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302638 if (rem < 0)
2639 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002640
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302641 data += (count - rem);
2642 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002643 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002644
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302645 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002646}
2647EXPORT_SYMBOL(hci_recv_fragment);
2648
Suraj Sumangala99811512010-07-14 13:02:19 +05302649#define STREAM_REASSEMBLY 0
2650
2651int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2652{
2653 int type;
2654 int rem = 0;
2655
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002656 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302657 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2658
2659 if (!skb) {
2660 struct { char type; } *pkt;
2661
2662 /* Start of the frame */
2663 pkt = data;
2664 type = pkt->type;
2665
2666 data++;
2667 count--;
2668 } else
2669 type = bt_cb(skb)->pkt_type;
2670
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002671 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002672 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302673 if (rem < 0)
2674 return rem;
2675
2676 data += (count - rem);
2677 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002678 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302679
2680 return rem;
2681}
2682EXPORT_SYMBOL(hci_recv_stream_fragment);
2683
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684/* ---- Interface to upper protocols ---- */
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686int hci_register_cb(struct hci_cb *cb)
2687{
2688 BT_DBG("%p name %s", cb, cb->name);
2689
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002690 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002692 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693
2694 return 0;
2695}
2696EXPORT_SYMBOL(hci_register_cb);
2697
2698int hci_unregister_cb(struct hci_cb *cb)
2699{
2700 BT_DBG("%p name %s", cb, cb->name);
2701
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002702 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002704 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
2706 return 0;
2707}
2708EXPORT_SYMBOL(hci_unregister_cb);
2709
Marcel Holtmann51086992013-10-10 14:54:19 -07002710static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002712 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002714 /* Time stamp */
2715 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002717 /* Send copy to monitor */
2718 hci_send_to_monitor(hdev, skb);
2719
2720 if (atomic_read(&hdev->promisc)) {
2721 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002722 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 }
2724
2725 /* Get rid of skb owner, prior to sending to the driver. */
2726 skb_orphan(skb);
2727
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002728 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002729 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730}
2731
Johan Hedberg3119ae92013-03-05 20:37:44 +02002732void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2733{
2734 skb_queue_head_init(&req->cmd_q);
2735 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002736 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002737}
2738
2739int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2740{
2741 struct hci_dev *hdev = req->hdev;
2742 struct sk_buff *skb;
2743 unsigned long flags;
2744
2745 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2746
Andre Guedes5d73e032013-03-08 11:20:16 -03002747 /* If an error occured during request building, remove all HCI
2748 * commands queued on the HCI request queue.
2749 */
2750 if (req->err) {
2751 skb_queue_purge(&req->cmd_q);
2752 return req->err;
2753 }
2754
Johan Hedberg3119ae92013-03-05 20:37:44 +02002755 /* Do not allow empty requests */
2756 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002757 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002758
2759 skb = skb_peek_tail(&req->cmd_q);
2760 bt_cb(skb)->req.complete = complete;
2761
2762 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2763 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2764 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2765
2766 queue_work(hdev->workqueue, &hdev->cmd_work);
2767
2768 return 0;
2769}
2770
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002771static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002772 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773{
2774 int len = HCI_COMMAND_HDR_SIZE + plen;
2775 struct hci_command_hdr *hdr;
2776 struct sk_buff *skb;
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002779 if (!skb)
2780 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
2782 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002783 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 hdr->plen = plen;
2785
2786 if (plen)
2787 memcpy(skb_put(skb, plen), param, plen);
2788
2789 BT_DBG("skb len %d", skb->len);
2790
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002791 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002792
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002793 return skb;
2794}
2795
2796/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002797int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2798 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002799{
2800 struct sk_buff *skb;
2801
2802 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2803
2804 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2805 if (!skb) {
2806 BT_ERR("%s no memory for command", hdev->name);
2807 return -ENOMEM;
2808 }
2809
Johan Hedberg11714b32013-03-05 20:37:47 +02002810 /* Stand-alone HCI commands must be flaged as
2811 * single-command requests.
2812 */
2813 bt_cb(skb)->req.start = true;
2814
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002816 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
2818 return 0;
2819}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
Johan Hedberg71c76a12013-03-05 20:37:46 +02002821/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002822void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2823 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002824{
2825 struct hci_dev *hdev = req->hdev;
2826 struct sk_buff *skb;
2827
2828 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2829
Andre Guedes34739c12013-03-08 11:20:18 -03002830 /* If an error occured during request building, there is no point in
2831 * queueing the HCI command. We can simply return.
2832 */
2833 if (req->err)
2834 return;
2835
Johan Hedberg71c76a12013-03-05 20:37:46 +02002836 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2837 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002838 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2839 hdev->name, opcode);
2840 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002841 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002842 }
2843
2844 if (skb_queue_empty(&req->cmd_q))
2845 bt_cb(skb)->req.start = true;
2846
Johan Hedberg02350a72013-04-03 21:50:29 +03002847 bt_cb(skb)->req.event = event;
2848
Johan Hedberg71c76a12013-03-05 20:37:46 +02002849 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002850}
2851
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002852void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2853 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002854{
2855 hci_req_add_ev(req, opcode, plen, param, 0);
2856}
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002859void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860{
2861 struct hci_command_hdr *hdr;
2862
2863 if (!hdev->sent_cmd)
2864 return NULL;
2865
2866 hdr = (void *) hdev->sent_cmd->data;
2867
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002868 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 return NULL;
2870
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002871 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872
2873 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2874}
2875
2876/* Send ACL data */
2877static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2878{
2879 struct hci_acl_hdr *hdr;
2880 int len = skb->len;
2881
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002882 skb_push(skb, HCI_ACL_HDR_SIZE);
2883 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002884 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002885 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2886 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
2888
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002889static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002890 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002892 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 struct hci_dev *hdev = conn->hdev;
2894 struct sk_buff *list;
2895
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002896 skb->len = skb_headlen(skb);
2897 skb->data_len = 0;
2898
2899 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002900
2901 switch (hdev->dev_type) {
2902 case HCI_BREDR:
2903 hci_add_acl_hdr(skb, conn->handle, flags);
2904 break;
2905 case HCI_AMP:
2906 hci_add_acl_hdr(skb, chan->handle, flags);
2907 break;
2908 default:
2909 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2910 return;
2911 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002912
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002913 list = skb_shinfo(skb)->frag_list;
2914 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 /* Non fragmented */
2916 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2917
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002918 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 } else {
2920 /* Fragmented */
2921 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2922
2923 skb_shinfo(skb)->frag_list = NULL;
2924
2925 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002926 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002928 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002929
2930 flags &= ~ACL_START;
2931 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 do {
2933 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002934
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002935 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002936 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
2938 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2939
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002940 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 } while (list);
2942
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002943 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002945}
2946
2947void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2948{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002949 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002950
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002951 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002952
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002953 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002955 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957
2958/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002959void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960{
2961 struct hci_dev *hdev = conn->hdev;
2962 struct hci_sco_hdr hdr;
2963
2964 BT_DBG("%s len %d", hdev->name, skb->len);
2965
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002966 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 hdr.dlen = skb->len;
2968
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002969 skb_push(skb, HCI_SCO_HDR_SIZE);
2970 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002971 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002973 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002976 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
2979/* ---- HCI TX task (outgoing data) ---- */
2980
2981/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002982static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2983 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984{
2985 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002986 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002987 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002989 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002991
2992 rcu_read_lock();
2993
2994 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002995 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002997
2998 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2999 continue;
3000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 num++;
3002
3003 if (c->sent < min) {
3004 min = c->sent;
3005 conn = c;
3006 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003007
3008 if (hci_conn_num(hdev, type) == num)
3009 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 }
3011
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003012 rcu_read_unlock();
3013
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003015 int cnt, q;
3016
3017 switch (conn->type) {
3018 case ACL_LINK:
3019 cnt = hdev->acl_cnt;
3020 break;
3021 case SCO_LINK:
3022 case ESCO_LINK:
3023 cnt = hdev->sco_cnt;
3024 break;
3025 case LE_LINK:
3026 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3027 break;
3028 default:
3029 cnt = 0;
3030 BT_ERR("Unknown link type");
3031 }
3032
3033 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 *quote = q ? q : 1;
3035 } else
3036 *quote = 0;
3037
3038 BT_DBG("conn %p quote %d", conn, *quote);
3039 return conn;
3040}
3041
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003042static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043{
3044 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003045 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046
Ville Tervobae1f5d92011-02-10 22:38:53 -03003047 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003049 rcu_read_lock();
3050
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003052 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003053 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003054 BT_ERR("%s killing stalled connection %pMR",
3055 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003056 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 }
3058 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003059
3060 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061}
3062
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003063static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3064 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065{
3066 struct hci_conn_hash *h = &hdev->conn_hash;
3067 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003068 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003069 struct hci_conn *conn;
3070 int cnt, q, conn_num = 0;
3071
3072 BT_DBG("%s", hdev->name);
3073
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003074 rcu_read_lock();
3075
3076 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003077 struct hci_chan *tmp;
3078
3079 if (conn->type != type)
3080 continue;
3081
3082 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3083 continue;
3084
3085 conn_num++;
3086
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003087 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003088 struct sk_buff *skb;
3089
3090 if (skb_queue_empty(&tmp->data_q))
3091 continue;
3092
3093 skb = skb_peek(&tmp->data_q);
3094 if (skb->priority < cur_prio)
3095 continue;
3096
3097 if (skb->priority > cur_prio) {
3098 num = 0;
3099 min = ~0;
3100 cur_prio = skb->priority;
3101 }
3102
3103 num++;
3104
3105 if (conn->sent < min) {
3106 min = conn->sent;
3107 chan = tmp;
3108 }
3109 }
3110
3111 if (hci_conn_num(hdev, type) == conn_num)
3112 break;
3113 }
3114
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003115 rcu_read_unlock();
3116
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003117 if (!chan)
3118 return NULL;
3119
3120 switch (chan->conn->type) {
3121 case ACL_LINK:
3122 cnt = hdev->acl_cnt;
3123 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003124 case AMP_LINK:
3125 cnt = hdev->block_cnt;
3126 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003127 case SCO_LINK:
3128 case ESCO_LINK:
3129 cnt = hdev->sco_cnt;
3130 break;
3131 case LE_LINK:
3132 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3133 break;
3134 default:
3135 cnt = 0;
3136 BT_ERR("Unknown link type");
3137 }
3138
3139 q = cnt / num;
3140 *quote = q ? q : 1;
3141 BT_DBG("chan %p quote %d", chan, *quote);
3142 return chan;
3143}
3144
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003145static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3146{
3147 struct hci_conn_hash *h = &hdev->conn_hash;
3148 struct hci_conn *conn;
3149 int num = 0;
3150
3151 BT_DBG("%s", hdev->name);
3152
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003153 rcu_read_lock();
3154
3155 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003156 struct hci_chan *chan;
3157
3158 if (conn->type != type)
3159 continue;
3160
3161 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3162 continue;
3163
3164 num++;
3165
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003166 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003167 struct sk_buff *skb;
3168
3169 if (chan->sent) {
3170 chan->sent = 0;
3171 continue;
3172 }
3173
3174 if (skb_queue_empty(&chan->data_q))
3175 continue;
3176
3177 skb = skb_peek(&chan->data_q);
3178 if (skb->priority >= HCI_PRIO_MAX - 1)
3179 continue;
3180
3181 skb->priority = HCI_PRIO_MAX - 1;
3182
3183 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003184 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003185 }
3186
3187 if (hci_conn_num(hdev, type) == num)
3188 break;
3189 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003190
3191 rcu_read_unlock();
3192
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003193}
3194
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003195static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3196{
3197 /* Calculate count of blocks used by this packet */
3198 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3199}
3200
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003201static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 if (!test_bit(HCI_RAW, &hdev->flags)) {
3204 /* ACL tx timeout must be longer than maximum
3205 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003206 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003207 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003208 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003210}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003212static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003213{
3214 unsigned int cnt = hdev->acl_cnt;
3215 struct hci_chan *chan;
3216 struct sk_buff *skb;
3217 int quote;
3218
3219 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003220
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003221 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003222 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003223 u32 priority = (skb_peek(&chan->data_q))->priority;
3224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003226 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003227
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003228 /* Stop if priority has changed */
3229 if (skb->priority < priority)
3230 break;
3231
3232 skb = skb_dequeue(&chan->data_q);
3233
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003234 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003235 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003236
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003237 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 hdev->acl_last_tx = jiffies;
3239
3240 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003241 chan->sent++;
3242 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 }
3244 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003245
3246 if (cnt != hdev->acl_cnt)
3247 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248}
3249
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003250static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003251{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003252 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253 struct hci_chan *chan;
3254 struct sk_buff *skb;
3255 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003256 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003257
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003258 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003259
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003260 BT_DBG("%s", hdev->name);
3261
3262 if (hdev->dev_type == HCI_AMP)
3263 type = AMP_LINK;
3264 else
3265 type = ACL_LINK;
3266
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003267 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003268 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003269 u32 priority = (skb_peek(&chan->data_q))->priority;
3270 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3271 int blocks;
3272
3273 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003274 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003275
3276 /* Stop if priority has changed */
3277 if (skb->priority < priority)
3278 break;
3279
3280 skb = skb_dequeue(&chan->data_q);
3281
3282 blocks = __get_blocks(hdev, skb);
3283 if (blocks > hdev->block_cnt)
3284 return;
3285
3286 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003287 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003288
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003289 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003290 hdev->acl_last_tx = jiffies;
3291
3292 hdev->block_cnt -= blocks;
3293 quote -= blocks;
3294
3295 chan->sent += blocks;
3296 chan->conn->sent += blocks;
3297 }
3298 }
3299
3300 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003301 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003302}
3303
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003304static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003305{
3306 BT_DBG("%s", hdev->name);
3307
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003308 /* No ACL link over BR/EDR controller */
3309 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3310 return;
3311
3312 /* No AMP link over AMP controller */
3313 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003314 return;
3315
3316 switch (hdev->flow_ctl_mode) {
3317 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3318 hci_sched_acl_pkt(hdev);
3319 break;
3320
3321 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3322 hci_sched_acl_blk(hdev);
3323 break;
3324 }
3325}
3326
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003328static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329{
3330 struct hci_conn *conn;
3331 struct sk_buff *skb;
3332 int quote;
3333
3334 BT_DBG("%s", hdev->name);
3335
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003336 if (!hci_conn_num(hdev, SCO_LINK))
3337 return;
3338
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3340 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3341 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003342 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
3344 conn->sent++;
3345 if (conn->sent == ~0)
3346 conn->sent = 0;
3347 }
3348 }
3349}
3350
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003351static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003352{
3353 struct hci_conn *conn;
3354 struct sk_buff *skb;
3355 int quote;
3356
3357 BT_DBG("%s", hdev->name);
3358
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003359 if (!hci_conn_num(hdev, ESCO_LINK))
3360 return;
3361
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003362 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3363 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003364 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3365 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003366 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003367
3368 conn->sent++;
3369 if (conn->sent == ~0)
3370 conn->sent = 0;
3371 }
3372 }
3373}
3374
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003375static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003376{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003377 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003378 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003379 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003380
3381 BT_DBG("%s", hdev->name);
3382
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003383 if (!hci_conn_num(hdev, LE_LINK))
3384 return;
3385
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003386 if (!test_bit(HCI_RAW, &hdev->flags)) {
3387 /* LE tx timeout must be longer than maximum
3388 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003389 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003390 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003391 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003392 }
3393
3394 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003395 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003396 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003397 u32 priority = (skb_peek(&chan->data_q))->priority;
3398 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003399 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003400 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003401
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003402 /* Stop if priority has changed */
3403 if (skb->priority < priority)
3404 break;
3405
3406 skb = skb_dequeue(&chan->data_q);
3407
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003408 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003409 hdev->le_last_tx = jiffies;
3410
3411 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003412 chan->sent++;
3413 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003414 }
3415 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003416
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003417 if (hdev->le_pkts)
3418 hdev->le_cnt = cnt;
3419 else
3420 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003421
3422 if (cnt != tmp)
3423 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003424}
3425
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003426static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003428 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 struct sk_buff *skb;
3430
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003431 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003432 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433
Marcel Holtmann52de5992013-09-03 18:08:38 -07003434 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3435 /* Schedule queues and send stuff to HCI driver */
3436 hci_sched_acl(hdev);
3437 hci_sched_sco(hdev);
3438 hci_sched_esco(hdev);
3439 hci_sched_le(hdev);
3440 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003441
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 /* Send next queued raw (unknown type) packet */
3443 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003444 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445}
3446
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003447/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
3449/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003450static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451{
3452 struct hci_acl_hdr *hdr = (void *) skb->data;
3453 struct hci_conn *conn;
3454 __u16 handle, flags;
3455
3456 skb_pull(skb, HCI_ACL_HDR_SIZE);
3457
3458 handle = __le16_to_cpu(hdr->handle);
3459 flags = hci_flags(handle);
3460 handle = hci_handle(handle);
3461
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003462 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003463 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
3465 hdev->stat.acl_rx++;
3466
3467 hci_dev_lock(hdev);
3468 conn = hci_conn_hash_lookup_handle(hdev, handle);
3469 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003470
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003472 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003473
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003475 l2cap_recv_acldata(conn, skb, flags);
3476 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003478 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003479 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 }
3481
3482 kfree_skb(skb);
3483}
3484
3485/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003486static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487{
3488 struct hci_sco_hdr *hdr = (void *) skb->data;
3489 struct hci_conn *conn;
3490 __u16 handle;
3491
3492 skb_pull(skb, HCI_SCO_HDR_SIZE);
3493
3494 handle = __le16_to_cpu(hdr->handle);
3495
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003496 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
3498 hdev->stat.sco_rx++;
3499
3500 hci_dev_lock(hdev);
3501 conn = hci_conn_hash_lookup_handle(hdev, handle);
3502 hci_dev_unlock(hdev);
3503
3504 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003506 sco_recv_scodata(conn, skb);
3507 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003509 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003510 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 }
3512
3513 kfree_skb(skb);
3514}
3515
Johan Hedberg9238f362013-03-05 20:37:48 +02003516static bool hci_req_is_complete(struct hci_dev *hdev)
3517{
3518 struct sk_buff *skb;
3519
3520 skb = skb_peek(&hdev->cmd_q);
3521 if (!skb)
3522 return true;
3523
3524 return bt_cb(skb)->req.start;
3525}
3526
Johan Hedberg42c6b122013-03-05 20:37:49 +02003527static void hci_resend_last(struct hci_dev *hdev)
3528{
3529 struct hci_command_hdr *sent;
3530 struct sk_buff *skb;
3531 u16 opcode;
3532
3533 if (!hdev->sent_cmd)
3534 return;
3535
3536 sent = (void *) hdev->sent_cmd->data;
3537 opcode = __le16_to_cpu(sent->opcode);
3538 if (opcode == HCI_OP_RESET)
3539 return;
3540
3541 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3542 if (!skb)
3543 return;
3544
3545 skb_queue_head(&hdev->cmd_q, skb);
3546 queue_work(hdev->workqueue, &hdev->cmd_work);
3547}
3548
Johan Hedberg9238f362013-03-05 20:37:48 +02003549void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3550{
3551 hci_req_complete_t req_complete = NULL;
3552 struct sk_buff *skb;
3553 unsigned long flags;
3554
3555 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3556
Johan Hedberg42c6b122013-03-05 20:37:49 +02003557 /* If the completed command doesn't match the last one that was
3558 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003559 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003560 if (!hci_sent_cmd_data(hdev, opcode)) {
3561 /* Some CSR based controllers generate a spontaneous
3562 * reset complete event during init and any pending
3563 * command will never be completed. In such a case we
3564 * need to resend whatever was the last sent
3565 * command.
3566 */
3567 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3568 hci_resend_last(hdev);
3569
Johan Hedberg9238f362013-03-05 20:37:48 +02003570 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003571 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003572
3573 /* If the command succeeded and there's still more commands in
3574 * this request the request is not yet complete.
3575 */
3576 if (!status && !hci_req_is_complete(hdev))
3577 return;
3578
3579 /* If this was the last command in a request the complete
3580 * callback would be found in hdev->sent_cmd instead of the
3581 * command queue (hdev->cmd_q).
3582 */
3583 if (hdev->sent_cmd) {
3584 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003585
3586 if (req_complete) {
3587 /* We must set the complete callback to NULL to
3588 * avoid calling the callback more than once if
3589 * this function gets called again.
3590 */
3591 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3592
Johan Hedberg9238f362013-03-05 20:37:48 +02003593 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003594 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003595 }
3596
3597 /* Remove all pending commands belonging to this request */
3598 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3599 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3600 if (bt_cb(skb)->req.start) {
3601 __skb_queue_head(&hdev->cmd_q, skb);
3602 break;
3603 }
3604
3605 req_complete = bt_cb(skb)->req.complete;
3606 kfree_skb(skb);
3607 }
3608 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3609
3610call_complete:
3611 if (req_complete)
3612 req_complete(hdev, status);
3613}
3614
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003615static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003617 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 struct sk_buff *skb;
3619
3620 BT_DBG("%s", hdev->name);
3621
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003623 /* Send copy to monitor */
3624 hci_send_to_monitor(hdev, skb);
3625
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 if (atomic_read(&hdev->promisc)) {
3627 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003628 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 }
3630
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003631 if (test_bit(HCI_RAW, &hdev->flags) ||
3632 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 kfree_skb(skb);
3634 continue;
3635 }
3636
3637 if (test_bit(HCI_INIT, &hdev->flags)) {
3638 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003639 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 case HCI_ACLDATA_PKT:
3641 case HCI_SCODATA_PKT:
3642 kfree_skb(skb);
3643 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003644 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 }
3646
3647 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003648 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003650 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 hci_event_packet(hdev, skb);
3652 break;
3653
3654 case HCI_ACLDATA_PKT:
3655 BT_DBG("%s ACL data packet", hdev->name);
3656 hci_acldata_packet(hdev, skb);
3657 break;
3658
3659 case HCI_SCODATA_PKT:
3660 BT_DBG("%s SCO data packet", hdev->name);
3661 hci_scodata_packet(hdev, skb);
3662 break;
3663
3664 default:
3665 kfree_skb(skb);
3666 break;
3667 }
3668 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669}
3670
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003671static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003673 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 struct sk_buff *skb;
3675
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003676 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3677 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003680 if (atomic_read(&hdev->cmd_cnt)) {
3681 skb = skb_dequeue(&hdev->cmd_q);
3682 if (!skb)
3683 return;
3684
Wei Yongjun7585b972009-02-25 18:29:52 +08003685 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003687 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003688 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003690 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003691 if (test_bit(HCI_RESET, &hdev->flags))
3692 del_timer(&hdev->cmd_timer);
3693 else
3694 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003695 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 } else {
3697 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003698 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 }
3700 }
3701}