blob: 7a3d17990b432279344e752d2312c17cbd434f3d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700356 struct hci_dev *hdev = req->hdev;
357
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200363
364 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700376 /* Read Current IAC LAP */
377 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
378
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700387 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
388 * but it does not support page scan related HCI commands.
389 */
390 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500391 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
392 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
393 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394}
395
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200397{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300398 struct hci_dev *hdev = req->hdev;
399
Johan Hedberg2177bab2013-03-05 20:37:43 +0200400 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402
403 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408
409 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200411
412 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300414
415 /* LE-only controllers have LE implicitly enabled */
416 if (!lmp_bredr_capable(hdev))
417 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418}
419
420static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
421{
422 if (lmp_ext_inq_capable(hdev))
423 return 0x02;
424
425 if (lmp_inq_rssi_capable(hdev))
426 return 0x01;
427
428 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
429 hdev->lmp_subver == 0x0757)
430 return 0x01;
431
432 if (hdev->manufacturer == 15) {
433 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
434 return 0x01;
435 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
436 return 0x01;
437 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
438 return 0x01;
439 }
440
441 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
442 hdev->lmp_subver == 0x1805)
443 return 0x01;
444
445 return 0x00;
446}
447
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449{
450 u8 mode;
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455}
456
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 struct hci_dev *hdev = req->hdev;
460
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461 /* The second byte is 0xff instead of 0x9f (two reserved bits
462 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
463 * command otherwise.
464 */
465 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
466
467 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
468 * any event mask for pre 1.2 devices.
469 */
470 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
471 return;
472
473 if (lmp_bredr_capable(hdev)) {
474 events[4] |= 0x01; /* Flow Specification Complete */
475 events[4] |= 0x02; /* Inquiry Result with RSSI */
476 events[4] |= 0x04; /* Read Remote Extended Features Complete */
477 events[5] |= 0x08; /* Synchronous Connection Complete */
478 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700479 } else {
480 /* Use a different default for LE-only devices */
481 memset(events, 0, sizeof(events));
482 events[0] |= 0x10; /* Disconnection Complete */
483 events[0] |= 0x80; /* Encryption Change */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491 }
492
493 if (lmp_inq_rssi_capable(hdev))
494 events[4] |= 0x02; /* Inquiry Result with RSSI */
495
496 if (lmp_sniffsubr_capable(hdev))
497 events[5] |= 0x20; /* Sniff Subrating */
498
499 if (lmp_pause_enc_capable(hdev))
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
501
502 if (lmp_ext_inq_capable(hdev))
503 events[5] |= 0x40; /* Extended Inquiry Result */
504
505 if (lmp_no_flush_capable(hdev))
506 events[7] |= 0x01; /* Enhanced Flush Complete */
507
508 if (lmp_lsto_capable(hdev))
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
510
511 if (lmp_ssp_capable(hdev)) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification
522 */
523 }
524
525 if (lmp_le_capable(hdev))
526 events[7] |= 0x20; /* LE Meta-Event */
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
530 if (lmp_le_capable(hdev)) {
531 memset(events, 0, sizeof(events));
532 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
534 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535 }
536}
537
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 struct hci_dev *hdev = req->hdev;
541
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300544 else
545 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546
547 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300552 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
553 * local supported commands HCI command.
554 */
555 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557
558 if (lmp_ssp_capable(hdev)) {
559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
560 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
562 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 } else {
564 struct hci_cp_write_eir cp;
565
566 memset(hdev->eir, 0, sizeof(hdev->eir));
567 memset(&cp, 0, sizeof(cp));
568
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200570 }
571 }
572
573 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575
576 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578
579 if (lmp_ext_feat_capable(hdev)) {
580 struct hci_cp_read_local_ext_features cp;
581
582 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
584 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585 }
586
587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
588 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 }
592}
593
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597 struct hci_cp_write_def_link_policy cp;
598 u16 link_policy = 0;
599
600 if (lmp_rswitch_capable(hdev))
601 link_policy |= HCI_LP_RSWITCH;
602 if (lmp_hold_capable(hdev))
603 link_policy |= HCI_LP_HOLD;
604 if (lmp_sniff_capable(hdev))
605 link_policy |= HCI_LP_SNIFF;
606 if (lmp_park_capable(hdev))
607 link_policy |= HCI_LP_PARK;
608
609 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611}
612
Johan Hedberg42c6b122013-03-05 20:37:49 +0200613static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 struct hci_cp_write_le_host_supported cp;
617
Johan Hedbergc73eee92013-04-19 18:35:21 +0300618 /* LE-only devices do not support explicit enablement */
619 if (!lmp_bredr_capable(hdev))
620 return;
621
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622 memset(&cp, 0, sizeof(cp));
623
624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
625 cp.le = 0x01;
626 cp.simul = lmp_le_br_capable(hdev);
627 }
628
629 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
631 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632}
633
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300634static void hci_set_event_mask_page_2(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
638
639 /* If Connectionless Slave Broadcast master role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x01) {
643 events[1] |= 0x40; /* Triggered Clock Capture */
644 events[1] |= 0x80; /* Synchronization Train Complete */
645 events[2] |= 0x10; /* Slave Page Response Timeout */
646 events[2] |= 0x20; /* CSB Channel Map Change */
647 }
648
649 /* If Connectionless Slave Broadcast slave role is supported
650 * enable all necessary events for it.
651 */
652 if (hdev->features[2][0] & 0x02) {
653 events[2] |= 0x01; /* Synchronization Train Received */
654 events[2] |= 0x02; /* CSB Receive */
655 events[2] |= 0x04; /* CSB Timeout */
656 events[2] |= 0x08; /* Truncated Page Complete */
657 }
658
659 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
660}
661
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300665 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200666
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100667 /* Some Broadcom based Bluetooth controllers do not support the
668 * Delete Stored Link Key command. They are clearly indicating its
669 * absence in the bit mask of supported commands.
670 *
671 * Check the supported commands and only if the the command is marked
672 * as supported send it. If not supported assume that the controller
673 * does not have actual support for stored link keys which makes this
674 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700675 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300676 if (hdev->commands[6] & 0x80) {
677 struct hci_cp_delete_stored_link_key cp;
678
679 bacpy(&cp.bdaddr, BDADDR_ANY);
680 cp.delete_all = 0x01;
681 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
682 sizeof(cp), &cp);
683 }
684
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200687
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700688 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300690
691 /* Read features beyond page 1 if available */
692 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
693 struct hci_cp_read_local_ext_features cp;
694
695 cp.page = p;
696 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
697 sizeof(cp), &cp);
698 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699}
700
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300701static void hci_init4_req(struct hci_request *req, unsigned long opt)
702{
703 struct hci_dev *hdev = req->hdev;
704
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300705 /* Set event mask page 2 if the HCI command for it is supported */
706 if (hdev->commands[22] & 0x04)
707 hci_set_event_mask_page_2(req);
708
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300709 /* Check for Synchronization Train support */
710 if (hdev->features[2][0] & 0x04)
711 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
712}
713
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714static int __hci_init(struct hci_dev *hdev)
715{
716 int err;
717
718 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
719 if (err < 0)
720 return err;
721
722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
723 * BR/EDR/LE type controllers. AMP controllers only need the
724 * first stage init.
725 */
726 if (hdev->dev_type != HCI_BREDR)
727 return 0;
728
729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
730 if (err < 0)
731 return err;
732
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
734 if (err < 0)
735 return err;
736
737 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738}
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
742 __u8 scan = opt;
743
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751{
752 __u8 auth = opt;
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
759
Johan Hedberg42c6b122013-03-05 20:37:49 +0200760static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761{
762 __u8 encrypt = opt;
763
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200766 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200767 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768}
769
Johan Hedberg42c6b122013-03-05 20:37:49 +0200770static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200771{
772 __le16 policy = cpu_to_le16(opt);
773
Johan Hedberg42c6b122013-03-05 20:37:49 +0200774 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200775
776 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200777 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200778}
779
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900780/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * Device is held on return. */
782struct hci_dev *hci_dev_get(int index)
783{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200784 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 BT_DBG("%d", index);
787
788 if (index < 0)
789 return NULL;
790
791 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200792 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 if (d->id == index) {
794 hdev = hci_dev_hold(d);
795 break;
796 }
797 }
798 read_unlock(&hci_dev_list_lock);
799 return hdev;
800}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200803
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200804bool hci_discovery_active(struct hci_dev *hdev)
805{
806 struct discovery_state *discov = &hdev->discovery;
807
Andre Guedes6fbe1952012-02-03 17:47:58 -0300808 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300809 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300810 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200811 return true;
812
Andre Guedes6fbe1952012-02-03 17:47:58 -0300813 default:
814 return false;
815 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200816}
817
Johan Hedbergff9ef572012-01-04 14:23:45 +0200818void hci_discovery_set_state(struct hci_dev *hdev, int state)
819{
820 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
821
822 if (hdev->discovery.state == state)
823 return;
824
825 switch (state) {
826 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300827 if (hdev->discovery.state != DISCOVERY_STARTING)
828 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200829 break;
830 case DISCOVERY_STARTING:
831 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300832 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200833 mgmt_discovering(hdev, 1);
834 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200835 case DISCOVERY_RESOLVING:
836 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200837 case DISCOVERY_STOPPING:
838 break;
839 }
840
841 hdev->discovery.state = state;
842}
843
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300844void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845{
Johan Hedberg30883512012-01-04 14:16:21 +0200846 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200847 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Johan Hedberg561aafb2012-01-04 13:31:59 +0200849 list_for_each_entry_safe(p, n, &cache->all, all) {
850 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200851 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200853
854 INIT_LIST_HEAD(&cache->unknown);
855 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
857
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300858struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
859 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Johan Hedberg30883512012-01-04 14:16:21 +0200861 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 struct inquiry_entry *e;
863
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300864 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Johan Hedberg561aafb2012-01-04 13:31:59 +0200866 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200868 return e;
869 }
870
871 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
Johan Hedberg561aafb2012-01-04 13:31:59 +0200874struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300875 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200876{
Johan Hedberg30883512012-01-04 14:16:21 +0200877 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200878 struct inquiry_entry *e;
879
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300880 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200881
882 list_for_each_entry(e, &cache->unknown, list) {
883 if (!bacmp(&e->data.bdaddr, bdaddr))
884 return e;
885 }
886
887 return NULL;
888}
889
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200890struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300891 bdaddr_t *bdaddr,
892 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200893{
894 struct discovery_state *cache = &hdev->discovery;
895 struct inquiry_entry *e;
896
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300897 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200898
899 list_for_each_entry(e, &cache->resolve, list) {
900 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
901 return e;
902 if (!bacmp(&e->data.bdaddr, bdaddr))
903 return e;
904 }
905
906 return NULL;
907}
908
Johan Hedberga3d4e202012-01-09 00:53:02 +0200909void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300910 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200911{
912 struct discovery_state *cache = &hdev->discovery;
913 struct list_head *pos = &cache->resolve;
914 struct inquiry_entry *p;
915
916 list_del(&ie->list);
917
918 list_for_each_entry(p, &cache->resolve, list) {
919 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300920 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200921 break;
922 pos = &p->list;
923 }
924
925 list_add(&ie->list, pos);
926}
927
Johan Hedberg31754052012-01-04 13:39:52 +0200928bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300929 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
Johan Hedberg30883512012-01-04 14:16:21 +0200931 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300934 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Szymon Janc2b2fec42012-11-20 11:38:54 +0100936 hci_remove_remote_oob_data(hdev, &data->bdaddr);
937
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200938 if (ssp)
939 *ssp = data->ssp_mode;
940
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200941 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200942 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200943 if (ie->data.ssp_mode && ssp)
944 *ssp = true;
945
Johan Hedberga3d4e202012-01-09 00:53:02 +0200946 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300947 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200948 ie->data.rssi = data->rssi;
949 hci_inquiry_cache_update_resolve(hdev, ie);
950 }
951
Johan Hedberg561aafb2012-01-04 13:31:59 +0200952 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200953 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954
Johan Hedberg561aafb2012-01-04 13:31:59 +0200955 /* Entry not in the cache. Add new one. */
956 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
957 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200958 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200959
960 list_add(&ie->all, &cache->all);
961
962 if (name_known) {
963 ie->name_state = NAME_KNOWN;
964 } else {
965 ie->name_state = NAME_NOT_KNOWN;
966 list_add(&ie->list, &cache->unknown);
967 }
968
969update:
970 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300971 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200972 ie->name_state = NAME_KNOWN;
973 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 }
975
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200976 memcpy(&ie->data, data, sizeof(*data));
977 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200979
980 if (ie->name_state == NAME_NOT_KNOWN)
981 return false;
982
983 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
986static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
987{
Johan Hedberg30883512012-01-04 14:16:21 +0200988 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 struct inquiry_info *info = (struct inquiry_info *) buf;
990 struct inquiry_entry *e;
991 int copied = 0;
992
Johan Hedberg561aafb2012-01-04 13:31:59 +0200993 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200995
996 if (copied >= num)
997 break;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 bacpy(&info->bdaddr, &data->bdaddr);
1000 info->pscan_rep_mode = data->pscan_rep_mode;
1001 info->pscan_period_mode = data->pscan_period_mode;
1002 info->pscan_mode = data->pscan_mode;
1003 memcpy(info->dev_class, data->dev_class, 3);
1004 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001007 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
1009
1010 BT_DBG("cache %p, copied %d", cache, copied);
1011 return copied;
1012}
1013
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015{
1016 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 struct hci_cp_inquiry cp;
1019
1020 BT_DBG("%s", hdev->name);
1021
1022 if (test_bit(HCI_INQUIRY, &hdev->flags))
1023 return;
1024
1025 /* Start Inquiry */
1026 memcpy(&cp.lap, &ir->lap, 3);
1027 cp.length = ir->length;
1028 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030}
1031
Andre Guedes3e13fa12013-03-27 20:04:56 -03001032static int wait_inquiry(void *word)
1033{
1034 schedule();
1035 return signal_pending(current);
1036}
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038int hci_inquiry(void __user *arg)
1039{
1040 __u8 __user *ptr = arg;
1041 struct hci_inquiry_req ir;
1042 struct hci_dev *hdev;
1043 int err = 0, do_inquiry = 0, max_rsp;
1044 long timeo;
1045 __u8 *buf;
1046
1047 if (copy_from_user(&ir, ptr, sizeof(ir)))
1048 return -EFAULT;
1049
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001050 hdev = hci_dev_get(ir.dev_id);
1051 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 return -ENODEV;
1053
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001054 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1055 err = -EBUSY;
1056 goto done;
1057 }
1058
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001059 if (hdev->dev_type != HCI_BREDR) {
1060 err = -EOPNOTSUPP;
1061 goto done;
1062 }
1063
Johan Hedberg56f87902013-10-02 13:43:13 +03001064 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1065 err = -EOPNOTSUPP;
1066 goto done;
1067 }
1068
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001069 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001070 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001071 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001072 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 do_inquiry = 1;
1074 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Marcel Holtmann04837f62006-07-03 10:02:33 +02001077 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001078
1079 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001080 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1081 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001082 if (err < 0)
1083 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001084
1085 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1086 * cleared). If it is interrupted by a signal, return -EINTR.
1087 */
1088 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1089 TASK_INTERRUPTIBLE))
1090 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001093 /* for unlimited number of responses we will use buffer with
1094 * 255 entries
1095 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1097
1098 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1099 * copy it to the user space.
1100 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001101 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001102 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 err = -ENOMEM;
1104 goto done;
1105 }
1106
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001107 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001109 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 BT_DBG("num_rsp %d", ir.num_rsp);
1112
1113 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1114 ptr += sizeof(ir);
1115 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001116 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001118 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 err = -EFAULT;
1120
1121 kfree(buf);
1122
1123done:
1124 hci_dev_put(hdev);
1125 return err;
1126}
1127
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001128static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 int ret = 0;
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 BT_DBG("%s %p", hdev->name, hdev);
1133
1134 hci_req_lock(hdev);
1135
Johan Hovold94324962012-03-15 14:48:41 +01001136 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1137 ret = -ENODEV;
1138 goto done;
1139 }
1140
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001141 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1142 /* Check for rfkill but allow the HCI setup stage to
1143 * proceed (which in itself doesn't cause any RF activity).
1144 */
1145 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1146 ret = -ERFKILL;
1147 goto done;
1148 }
1149
1150 /* Check for valid public address or a configured static
1151 * random adddress, but let the HCI setup proceed to
1152 * be able to determine if there is a public address
1153 * or not.
1154 *
1155 * This check is only valid for BR/EDR controllers
1156 * since AMP controllers do not have an address.
1157 */
1158 if (hdev->dev_type == HCI_BREDR &&
1159 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1160 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1161 ret = -EADDRNOTAVAIL;
1162 goto done;
1163 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001164 }
1165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 if (test_bit(HCI_UP, &hdev->flags)) {
1167 ret = -EALREADY;
1168 goto done;
1169 }
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (hdev->open(hdev)) {
1172 ret = -EIO;
1173 goto done;
1174 }
1175
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001176 atomic_set(&hdev->cmd_cnt, 1);
1177 set_bit(HCI_INIT, &hdev->flags);
1178
1179 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1180 ret = hdev->setup(hdev);
1181
1182 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001183 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1184 set_bit(HCI_RAW, &hdev->flags);
1185
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001186 if (!test_bit(HCI_RAW, &hdev->flags) &&
1187 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001188 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 }
1190
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001191 clear_bit(HCI_INIT, &hdev->flags);
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 if (!ret) {
1194 hci_dev_hold(hdev);
1195 set_bit(HCI_UP, &hdev->flags);
1196 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001197 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001198 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001199 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001200 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001201 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001202 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001203 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001204 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001206 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001207 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001208 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 skb_queue_purge(&hdev->cmd_q);
1211 skb_queue_purge(&hdev->rx_q);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 if (hdev->sent_cmd) {
1217 kfree_skb(hdev->sent_cmd);
1218 hdev->sent_cmd = NULL;
1219 }
1220
1221 hdev->close(hdev);
1222 hdev->flags = 0;
1223 }
1224
1225done:
1226 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 return ret;
1228}
1229
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001230/* ---- HCI ioctl helpers ---- */
1231
1232int hci_dev_open(__u16 dev)
1233{
1234 struct hci_dev *hdev;
1235 int err;
1236
1237 hdev = hci_dev_get(dev);
1238 if (!hdev)
1239 return -ENODEV;
1240
Johan Hedberge1d08f42013-10-01 22:44:50 +03001241 /* We need to ensure that no other power on/off work is pending
1242 * before proceeding to call hci_dev_do_open. This is
1243 * particularly important if the setup procedure has not yet
1244 * completed.
1245 */
1246 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1247 cancel_delayed_work(&hdev->power_off);
1248
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001249 /* After this call it is guaranteed that the setup procedure
1250 * has finished. This means that error conditions like RFKILL
1251 * or no valid public or static random address apply.
1252 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001253 flush_workqueue(hdev->req_workqueue);
1254
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001255 err = hci_dev_do_open(hdev);
1256
1257 hci_dev_put(hdev);
1258
1259 return err;
1260}
1261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262static int hci_dev_do_close(struct hci_dev *hdev)
1263{
1264 BT_DBG("%s %p", hdev->name, hdev);
1265
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001266 cancel_delayed_work(&hdev->power_off);
1267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 hci_req_cancel(hdev, ENODEV);
1269 hci_req_lock(hdev);
1270
1271 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001272 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 hci_req_unlock(hdev);
1274 return 0;
1275 }
1276
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001277 /* Flush RX and TX works */
1278 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001279 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001281 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001282 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001283 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001284 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001285 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001286 }
1287
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001288 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001289 cancel_delayed_work(&hdev->service_cache);
1290
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001291 cancel_delayed_work_sync(&hdev->le_scan_disable);
1292
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001293 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001294 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001296 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
1298 hci_notify(hdev, HCI_DEV_DOWN);
1299
1300 if (hdev->flush)
1301 hdev->flush(hdev);
1302
1303 /* Reset device */
1304 skb_queue_purge(&hdev->cmd_q);
1305 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001306 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001307 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001308 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001310 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 clear_bit(HCI_INIT, &hdev->flags);
1312 }
1313
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001314 /* flush cmd work */
1315 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317 /* Drop queues */
1318 skb_queue_purge(&hdev->rx_q);
1319 skb_queue_purge(&hdev->cmd_q);
1320 skb_queue_purge(&hdev->raw_q);
1321
1322 /* Drop last sent command */
1323 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001324 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 kfree_skb(hdev->sent_cmd);
1326 hdev->sent_cmd = NULL;
1327 }
1328
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001329 kfree_skb(hdev->recv_evt);
1330 hdev->recv_evt = NULL;
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 /* After this point our queues are empty
1333 * and no tasks are scheduled. */
1334 hdev->close(hdev);
1335
Johan Hedberg35b973c2013-03-15 17:06:59 -05001336 /* Clear flags */
1337 hdev->flags = 0;
1338 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1339
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001340 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1341 if (hdev->dev_type == HCI_BREDR) {
1342 hci_dev_lock(hdev);
1343 mgmt_powered(hdev, 0);
1344 hci_dev_unlock(hdev);
1345 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001346 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001347
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001348 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001349 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001350
Johan Hedberge59fda82012-02-22 18:11:53 +02001351 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001352 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 hci_req_unlock(hdev);
1355
1356 hci_dev_put(hdev);
1357 return 0;
1358}
1359
1360int hci_dev_close(__u16 dev)
1361{
1362 struct hci_dev *hdev;
1363 int err;
1364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001365 hdev = hci_dev_get(dev);
1366 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001368
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1370 err = -EBUSY;
1371 goto done;
1372 }
1373
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001374 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1375 cancel_delayed_work(&hdev->power_off);
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001378
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001379done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 hci_dev_put(hdev);
1381 return err;
1382}
1383
1384int hci_dev_reset(__u16 dev)
1385{
1386 struct hci_dev *hdev;
1387 int ret = 0;
1388
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001389 hdev = hci_dev_get(dev);
1390 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 return -ENODEV;
1392
1393 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Marcel Holtmann808a0492013-08-26 20:57:58 -07001395 if (!test_bit(HCI_UP, &hdev->flags)) {
1396 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001400 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1401 ret = -EBUSY;
1402 goto done;
1403 }
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 /* Drop queues */
1406 skb_queue_purge(&hdev->rx_q);
1407 skb_queue_purge(&hdev->cmd_q);
1408
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001409 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001410 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001412 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
1414 if (hdev->flush)
1415 hdev->flush(hdev);
1416
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001417 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001418 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001421 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 hci_req_unlock(hdev);
1425 hci_dev_put(hdev);
1426 return ret;
1427}
1428
1429int hci_dev_reset_stat(__u16 dev)
1430{
1431 struct hci_dev *hdev;
1432 int ret = 0;
1433
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001434 hdev = hci_dev_get(dev);
1435 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return -ENODEV;
1437
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1439 ret = -EBUSY;
1440 goto done;
1441 }
1442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1444
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001445done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return ret;
1448}
1449
1450int hci_dev_cmd(unsigned int cmd, void __user *arg)
1451{
1452 struct hci_dev *hdev;
1453 struct hci_dev_req dr;
1454 int err = 0;
1455
1456 if (copy_from_user(&dr, arg, sizeof(dr)))
1457 return -EFAULT;
1458
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001459 hdev = hci_dev_get(dr.dev_id);
1460 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return -ENODEV;
1462
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001463 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1464 err = -EBUSY;
1465 goto done;
1466 }
1467
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001468 if (hdev->dev_type != HCI_BREDR) {
1469 err = -EOPNOTSUPP;
1470 goto done;
1471 }
1472
Johan Hedberg56f87902013-10-02 13:43:13 +03001473 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1474 err = -EOPNOTSUPP;
1475 goto done;
1476 }
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 switch (cmd) {
1479 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001480 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1481 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 break;
1483
1484 case HCISETENCRYPT:
1485 if (!lmp_encrypt_capable(hdev)) {
1486 err = -EOPNOTSUPP;
1487 break;
1488 }
1489
1490 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1491 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001492 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1493 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 if (err)
1495 break;
1496 }
1497
Johan Hedberg01178cd2013-03-05 20:37:41 +02001498 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1499 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 break;
1501
1502 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001503 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1504 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 break;
1506
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001507 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001508 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1509 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001510 break;
1511
1512 case HCISETLINKMODE:
1513 hdev->link_mode = ((__u16) dr.dev_opt) &
1514 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1515 break;
1516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 case HCISETPTYPE:
1518 hdev->pkt_type = (__u16) dr.dev_opt;
1519 break;
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001522 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1523 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 break;
1525
1526 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001527 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1528 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 break;
1530
1531 default:
1532 err = -EINVAL;
1533 break;
1534 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001535
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001536done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 hci_dev_put(hdev);
1538 return err;
1539}
1540
1541int hci_get_dev_list(void __user *arg)
1542{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001543 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 struct hci_dev_list_req *dl;
1545 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 int n = 0, size, err;
1547 __u16 dev_num;
1548
1549 if (get_user(dev_num, (__u16 __user *) arg))
1550 return -EFAULT;
1551
1552 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1553 return -EINVAL;
1554
1555 size = sizeof(*dl) + dev_num * sizeof(*dr);
1556
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001557 dl = kzalloc(size, GFP_KERNEL);
1558 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 return -ENOMEM;
1560
1561 dr = dl->dev_req;
1562
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001563 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001564 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001565 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001566 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001567
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001568 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1569 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 (dr + n)->dev_id = hdev->id;
1572 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (++n >= dev_num)
1575 break;
1576 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001577 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579 dl->dev_num = n;
1580 size = sizeof(*dl) + n * sizeof(*dr);
1581
1582 err = copy_to_user(arg, dl, size);
1583 kfree(dl);
1584
1585 return err ? -EFAULT : 0;
1586}
1587
1588int hci_get_dev_info(void __user *arg)
1589{
1590 struct hci_dev *hdev;
1591 struct hci_dev_info di;
1592 int err = 0;
1593
1594 if (copy_from_user(&di, arg, sizeof(di)))
1595 return -EFAULT;
1596
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001597 hdev = hci_dev_get(di.dev_id);
1598 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 return -ENODEV;
1600
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001601 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001602 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001603
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001604 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1605 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 strcpy(di.name, hdev->name);
1608 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001609 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 di.flags = hdev->flags;
1611 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001612 if (lmp_bredr_capable(hdev)) {
1613 di.acl_mtu = hdev->acl_mtu;
1614 di.acl_pkts = hdev->acl_pkts;
1615 di.sco_mtu = hdev->sco_mtu;
1616 di.sco_pkts = hdev->sco_pkts;
1617 } else {
1618 di.acl_mtu = hdev->le_mtu;
1619 di.acl_pkts = hdev->le_pkts;
1620 di.sco_mtu = 0;
1621 di.sco_pkts = 0;
1622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 di.link_policy = hdev->link_policy;
1624 di.link_mode = hdev->link_mode;
1625
1626 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1627 memcpy(&di.features, &hdev->features, sizeof(di.features));
1628
1629 if (copy_to_user(arg, &di, sizeof(di)))
1630 err = -EFAULT;
1631
1632 hci_dev_put(hdev);
1633
1634 return err;
1635}
1636
1637/* ---- Interface to HCI drivers ---- */
1638
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001639static int hci_rfkill_set_block(void *data, bool blocked)
1640{
1641 struct hci_dev *hdev = data;
1642
1643 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1646 return -EBUSY;
1647
Johan Hedberg5e130362013-09-13 08:58:17 +03001648 if (blocked) {
1649 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001650 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1651 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001652 } else {
1653 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001654 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001655
1656 return 0;
1657}
1658
1659static const struct rfkill_ops hci_rfkill_ops = {
1660 .set_block = hci_rfkill_set_block,
1661};
1662
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001663static void hci_power_on(struct work_struct *work)
1664{
1665 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001666 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001667
1668 BT_DBG("%s", hdev->name);
1669
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001670 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001671 if (err < 0) {
1672 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001673 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001674 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001675
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001676 /* During the HCI setup phase, a few error conditions are
1677 * ignored and they need to be checked now. If they are still
1678 * valid, it is important to turn the device back off.
1679 */
1680 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1681 (hdev->dev_type == HCI_BREDR &&
1682 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1683 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001684 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1685 hci_dev_do_close(hdev);
1686 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001687 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1688 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001689 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001690
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001691 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001692 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001693}
1694
1695static void hci_power_off(struct work_struct *work)
1696{
Johan Hedberg32435532011-11-07 22:16:04 +02001697 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001698 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001699
1700 BT_DBG("%s", hdev->name);
1701
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001702 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001703}
1704
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001705static void hci_discov_off(struct work_struct *work)
1706{
1707 struct hci_dev *hdev;
Marcel Holtmannb1e73122013-10-15 06:33:51 -07001708 struct hci_request req;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001709 u8 scan = SCAN_PAGE;
1710
1711 hdev = container_of(work, struct hci_dev, discov_off.work);
1712
1713 BT_DBG("%s", hdev->name);
1714
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001715 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001716
Marcel Holtmannb1e73122013-10-15 06:33:51 -07001717 hci_req_init(&req, hdev);
1718 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1719 hci_req_run(&req, NULL);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001720
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001721 /* When discoverable timeout triggers, then just make sure
1722 * the limited discoverable flag is cleared. Even in the case
1723 * of a timeout triggered from general discoverable, it is
1724 * safe to unconditionally clear the flag.
1725 */
1726 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1727
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001728 hdev->discov_timeout = 0;
1729
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001730 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001731}
1732
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001733int hci_uuids_clear(struct hci_dev *hdev)
1734{
Johan Hedberg48210022013-01-27 00:31:28 +02001735 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001736
Johan Hedberg48210022013-01-27 00:31:28 +02001737 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1738 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001739 kfree(uuid);
1740 }
1741
1742 return 0;
1743}
1744
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001745int hci_link_keys_clear(struct hci_dev *hdev)
1746{
1747 struct list_head *p, *n;
1748
1749 list_for_each_safe(p, n, &hdev->link_keys) {
1750 struct link_key *key;
1751
1752 key = list_entry(p, struct link_key, list);
1753
1754 list_del(p);
1755 kfree(key);
1756 }
1757
1758 return 0;
1759}
1760
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001761int hci_smp_ltks_clear(struct hci_dev *hdev)
1762{
1763 struct smp_ltk *k, *tmp;
1764
1765 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1766 list_del(&k->list);
1767 kfree(k);
1768 }
1769
1770 return 0;
1771}
1772
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001773struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1774{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001775 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001776
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001777 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001778 if (bacmp(bdaddr, &k->bdaddr) == 0)
1779 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001780
1781 return NULL;
1782}
1783
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301784static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001785 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001786{
1787 /* Legacy key */
1788 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301789 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001790
1791 /* Debug keys are insecure so don't store them persistently */
1792 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301793 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001794
1795 /* Changed combination key and there's no previous one */
1796 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301797 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001798
1799 /* Security mode 3 case */
1800 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301801 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001802
1803 /* Neither local nor remote side had no-bonding as requirement */
1804 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301805 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001806
1807 /* Local side had dedicated bonding as requirement */
1808 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301809 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001810
1811 /* Remote side had dedicated bonding as requirement */
1812 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301813 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001814
1815 /* If none of the above criteria match, then don't store the key
1816 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301817 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001818}
1819
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001820struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001821{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001822 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001823
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001824 list_for_each_entry(k, &hdev->long_term_keys, list) {
1825 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001826 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001827 continue;
1828
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001829 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001830 }
1831
1832 return NULL;
1833}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001834
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001835struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001836 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001837{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001838 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001839
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001840 list_for_each_entry(k, &hdev->long_term_keys, list)
1841 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001842 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001843 return k;
1844
1845 return NULL;
1846}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001847
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001848int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001849 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001850{
1851 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301852 u8 old_key_type;
1853 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001854
1855 old_key = hci_find_link_key(hdev, bdaddr);
1856 if (old_key) {
1857 old_key_type = old_key->type;
1858 key = old_key;
1859 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001860 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001861 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1862 if (!key)
1863 return -ENOMEM;
1864 list_add(&key->list, &hdev->link_keys);
1865 }
1866
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001867 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001868
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001869 /* Some buggy controller combinations generate a changed
1870 * combination key for legacy pairing even when there's no
1871 * previous key */
1872 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001873 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001874 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001875 if (conn)
1876 conn->key_type = type;
1877 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001878
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001879 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001880 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001881 key->pin_len = pin_len;
1882
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001883 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001884 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001885 else
1886 key->type = type;
1887
Johan Hedberg4df378a2011-04-28 11:29:03 -07001888 if (!new_key)
1889 return 0;
1890
1891 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1892
Johan Hedberg744cf192011-11-08 20:40:14 +02001893 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001894
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301895 if (conn)
1896 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001897
1898 return 0;
1899}
1900
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001901int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001902 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001903 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001904{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001905 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001906
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001907 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1908 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001909
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001910 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1911 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001912 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001913 else {
1914 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001915 if (!key)
1916 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001917 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001918 }
1919
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001920 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001921 key->bdaddr_type = addr_type;
1922 memcpy(key->val, tk, sizeof(key->val));
1923 key->authenticated = authenticated;
1924 key->ediv = ediv;
1925 key->enc_size = enc_size;
1926 key->type = type;
1927 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001928
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001929 if (!new_key)
1930 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001931
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001932 if (type & HCI_SMP_LTK)
1933 mgmt_new_ltk(hdev, key, 1);
1934
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001935 return 0;
1936}
1937
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001938int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1939{
1940 struct link_key *key;
1941
1942 key = hci_find_link_key(hdev, bdaddr);
1943 if (!key)
1944 return -ENOENT;
1945
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001946 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001947
1948 list_del(&key->list);
1949 kfree(key);
1950
1951 return 0;
1952}
1953
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001954int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1955{
1956 struct smp_ltk *k, *tmp;
1957
1958 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1959 if (bacmp(bdaddr, &k->bdaddr))
1960 continue;
1961
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001962 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001963
1964 list_del(&k->list);
1965 kfree(k);
1966 }
1967
1968 return 0;
1969}
1970
Ville Tervo6bd32322011-02-16 16:32:41 +02001971/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001972static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001973{
1974 struct hci_dev *hdev = (void *) arg;
1975
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001976 if (hdev->sent_cmd) {
1977 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1978 u16 opcode = __le16_to_cpu(sent->opcode);
1979
1980 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1981 } else {
1982 BT_ERR("%s command tx timeout", hdev->name);
1983 }
1984
Ville Tervo6bd32322011-02-16 16:32:41 +02001985 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001986 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001987}
1988
Szymon Janc2763eda2011-03-22 13:12:22 +01001989struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001990 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001991{
1992 struct oob_data *data;
1993
1994 list_for_each_entry(data, &hdev->remote_oob_data, list)
1995 if (bacmp(bdaddr, &data->bdaddr) == 0)
1996 return data;
1997
1998 return NULL;
1999}
2000
2001int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2002{
2003 struct oob_data *data;
2004
2005 data = hci_find_remote_oob_data(hdev, bdaddr);
2006 if (!data)
2007 return -ENOENT;
2008
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002009 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002010
2011 list_del(&data->list);
2012 kfree(data);
2013
2014 return 0;
2015}
2016
2017int hci_remote_oob_data_clear(struct hci_dev *hdev)
2018{
2019 struct oob_data *data, *n;
2020
2021 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2022 list_del(&data->list);
2023 kfree(data);
2024 }
2025
2026 return 0;
2027}
2028
2029int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002030 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002031{
2032 struct oob_data *data;
2033
2034 data = hci_find_remote_oob_data(hdev, bdaddr);
2035
2036 if (!data) {
2037 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2038 if (!data)
2039 return -ENOMEM;
2040
2041 bacpy(&data->bdaddr, bdaddr);
2042 list_add(&data->list, &hdev->remote_oob_data);
2043 }
2044
2045 memcpy(data->hash, hash, sizeof(data->hash));
2046 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2047
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002048 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002049
2050 return 0;
2051}
2052
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002053struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002054{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002055 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002056
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002057 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002058 if (bacmp(bdaddr, &b->bdaddr) == 0)
2059 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002060
2061 return NULL;
2062}
2063
2064int hci_blacklist_clear(struct hci_dev *hdev)
2065{
2066 struct list_head *p, *n;
2067
2068 list_for_each_safe(p, n, &hdev->blacklist) {
2069 struct bdaddr_list *b;
2070
2071 b = list_entry(p, struct bdaddr_list, list);
2072
2073 list_del(p);
2074 kfree(b);
2075 }
2076
2077 return 0;
2078}
2079
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002080int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002081{
2082 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002083
2084 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2085 return -EBADF;
2086
Antti Julku5e762442011-08-25 16:48:02 +03002087 if (hci_blacklist_lookup(hdev, bdaddr))
2088 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002089
2090 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002091 if (!entry)
2092 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002093
2094 bacpy(&entry->bdaddr, bdaddr);
2095
2096 list_add(&entry->list, &hdev->blacklist);
2097
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002098 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002099}
2100
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002101int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002102{
2103 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002104
Szymon Janc1ec918c2011-11-16 09:32:21 +01002105 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002106 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002107
2108 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002109 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002110 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002111
2112 list_del(&entry->list);
2113 kfree(entry);
2114
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002115 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002116}
2117
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002118static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002119{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002120 if (status) {
2121 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002122
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002123 hci_dev_lock(hdev);
2124 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2125 hci_dev_unlock(hdev);
2126 return;
2127 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002128}
2129
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002130static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002131{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002132 /* General inquiry access code (GIAC) */
2133 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2134 struct hci_request req;
2135 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002136 int err;
2137
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002138 if (status) {
2139 BT_ERR("Failed to disable LE scanning: status %d", status);
2140 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002141 }
2142
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002143 switch (hdev->discovery.type) {
2144 case DISCOV_TYPE_LE:
2145 hci_dev_lock(hdev);
2146 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2147 hci_dev_unlock(hdev);
2148 break;
2149
2150 case DISCOV_TYPE_INTERLEAVED:
2151 hci_req_init(&req, hdev);
2152
2153 memset(&cp, 0, sizeof(cp));
2154 memcpy(&cp.lap, lap, sizeof(cp.lap));
2155 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2156 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2157
2158 hci_dev_lock(hdev);
2159
2160 hci_inquiry_cache_flush(hdev);
2161
2162 err = hci_req_run(&req, inquiry_complete);
2163 if (err) {
2164 BT_ERR("Inquiry request failed: err %d", err);
2165 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2166 }
2167
2168 hci_dev_unlock(hdev);
2169 break;
2170 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002171}
2172
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002173static void le_scan_disable_work(struct work_struct *work)
2174{
2175 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002176 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002177 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002178 struct hci_request req;
2179 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002180
2181 BT_DBG("%s", hdev->name);
2182
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002183 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002184
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002185 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002186 cp.enable = LE_SCAN_DISABLE;
2187 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002188
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002189 err = hci_req_run(&req, le_scan_disable_work_complete);
2190 if (err)
2191 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002192}
2193
David Herrmann9be0dab2012-04-22 14:39:57 +02002194/* Alloc HCI device */
2195struct hci_dev *hci_alloc_dev(void)
2196{
2197 struct hci_dev *hdev;
2198
2199 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2200 if (!hdev)
2201 return NULL;
2202
David Herrmannb1b813d2012-04-22 14:39:58 +02002203 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2204 hdev->esco_type = (ESCO_HV1);
2205 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002206 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2207 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002208 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2209 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002210
David Herrmannb1b813d2012-04-22 14:39:58 +02002211 hdev->sniff_max_interval = 800;
2212 hdev->sniff_min_interval = 80;
2213
Marcel Holtmannbef64732013-10-11 08:23:19 -07002214 hdev->le_scan_interval = 0x0060;
2215 hdev->le_scan_window = 0x0030;
2216
David Herrmannb1b813d2012-04-22 14:39:58 +02002217 mutex_init(&hdev->lock);
2218 mutex_init(&hdev->req_lock);
2219
2220 INIT_LIST_HEAD(&hdev->mgmt_pending);
2221 INIT_LIST_HEAD(&hdev->blacklist);
2222 INIT_LIST_HEAD(&hdev->uuids);
2223 INIT_LIST_HEAD(&hdev->link_keys);
2224 INIT_LIST_HEAD(&hdev->long_term_keys);
2225 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002226 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002227
2228 INIT_WORK(&hdev->rx_work, hci_rx_work);
2229 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2230 INIT_WORK(&hdev->tx_work, hci_tx_work);
2231 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002232
David Herrmannb1b813d2012-04-22 14:39:58 +02002233 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2234 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2235 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2236
David Herrmannb1b813d2012-04-22 14:39:58 +02002237 skb_queue_head_init(&hdev->rx_q);
2238 skb_queue_head_init(&hdev->cmd_q);
2239 skb_queue_head_init(&hdev->raw_q);
2240
2241 init_waitqueue_head(&hdev->req_wait_q);
2242
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002243 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002244
David Herrmannb1b813d2012-04-22 14:39:58 +02002245 hci_init_sysfs(hdev);
2246 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002247
2248 return hdev;
2249}
2250EXPORT_SYMBOL(hci_alloc_dev);
2251
2252/* Free HCI device */
2253void hci_free_dev(struct hci_dev *hdev)
2254{
David Herrmann9be0dab2012-04-22 14:39:57 +02002255 /* will free via device release */
2256 put_device(&hdev->dev);
2257}
2258EXPORT_SYMBOL(hci_free_dev);
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260/* Register HCI device */
2261int hci_register_dev(struct hci_dev *hdev)
2262{
David Herrmannb1b813d2012-04-22 14:39:58 +02002263 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
David Herrmann010666a2012-01-07 15:47:07 +01002265 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 return -EINVAL;
2267
Mat Martineau08add512011-11-02 16:18:36 -07002268 /* Do not allow HCI_AMP devices to register at index 0,
2269 * so the index can be used as the AMP controller ID.
2270 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002271 switch (hdev->dev_type) {
2272 case HCI_BREDR:
2273 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2274 break;
2275 case HCI_AMP:
2276 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2277 break;
2278 default:
2279 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002281
Sasha Levin3df92b32012-05-27 22:36:56 +02002282 if (id < 0)
2283 return id;
2284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 sprintf(hdev->name, "hci%d", id);
2286 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002287
2288 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2289
Kees Cookd8537542013-07-03 15:04:57 -07002290 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2291 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002292 if (!hdev->workqueue) {
2293 error = -ENOMEM;
2294 goto err;
2295 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002296
Kees Cookd8537542013-07-03 15:04:57 -07002297 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2298 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002299 if (!hdev->req_workqueue) {
2300 destroy_workqueue(hdev->workqueue);
2301 error = -ENOMEM;
2302 goto err;
2303 }
2304
David Herrmann33ca9542011-10-08 14:58:49 +02002305 error = hci_add_sysfs(hdev);
2306 if (error < 0)
2307 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002309 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002310 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2311 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002312 if (hdev->rfkill) {
2313 if (rfkill_register(hdev->rfkill) < 0) {
2314 rfkill_destroy(hdev->rfkill);
2315 hdev->rfkill = NULL;
2316 }
2317 }
2318
Johan Hedberg5e130362013-09-13 08:58:17 +03002319 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2320 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2321
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002322 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002323 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002324
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002325 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002326 /* Assume BR/EDR support until proven otherwise (such as
2327 * through reading supported features during init.
2328 */
2329 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2330 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002331
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002332 write_lock(&hci_dev_list_lock);
2333 list_add(&hdev->list, &hci_dev_list);
2334 write_unlock(&hci_dev_list_lock);
2335
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002337 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Johan Hedberg19202572013-01-14 22:33:51 +02002339 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002340
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002342
David Herrmann33ca9542011-10-08 14:58:49 +02002343err_wqueue:
2344 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002345 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002346err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002347 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002348
David Herrmann33ca9542011-10-08 14:58:49 +02002349 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350}
2351EXPORT_SYMBOL(hci_register_dev);
2352
2353/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002354void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355{
Sasha Levin3df92b32012-05-27 22:36:56 +02002356 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002357
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002358 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Johan Hovold94324962012-03-15 14:48:41 +01002360 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2361
Sasha Levin3df92b32012-05-27 22:36:56 +02002362 id = hdev->id;
2363
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002364 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002366 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367
2368 hci_dev_do_close(hdev);
2369
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302370 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002371 kfree_skb(hdev->reassembly[i]);
2372
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002373 cancel_work_sync(&hdev->power_on);
2374
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002375 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002376 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002377 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002378 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002379 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002380 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002381
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002382 /* mgmt_index_removed should take care of emptying the
2383 * pending list */
2384 BUG_ON(!list_empty(&hdev->mgmt_pending));
2385
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 hci_notify(hdev, HCI_DEV_UNREG);
2387
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002388 if (hdev->rfkill) {
2389 rfkill_unregister(hdev->rfkill);
2390 rfkill_destroy(hdev->rfkill);
2391 }
2392
David Herrmannce242972011-10-08 14:58:48 +02002393 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002394
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002395 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002396 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002397
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002398 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002399 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002400 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002401 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002402 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002403 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002404 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002405
David Herrmanndc946bd2012-01-07 15:47:24 +01002406 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002407
2408 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409}
2410EXPORT_SYMBOL(hci_unregister_dev);
2411
2412/* Suspend HCI device */
2413int hci_suspend_dev(struct hci_dev *hdev)
2414{
2415 hci_notify(hdev, HCI_DEV_SUSPEND);
2416 return 0;
2417}
2418EXPORT_SYMBOL(hci_suspend_dev);
2419
2420/* Resume HCI device */
2421int hci_resume_dev(struct hci_dev *hdev)
2422{
2423 hci_notify(hdev, HCI_DEV_RESUME);
2424 return 0;
2425}
2426EXPORT_SYMBOL(hci_resume_dev);
2427
Marcel Holtmann76bca882009-11-18 00:40:39 +01002428/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002429int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002430{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002431 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002432 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002433 kfree_skb(skb);
2434 return -ENXIO;
2435 }
2436
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002437 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002438 bt_cb(skb)->incoming = 1;
2439
2440 /* Time stamp */
2441 __net_timestamp(skb);
2442
Marcel Holtmann76bca882009-11-18 00:40:39 +01002443 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002444 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002445
Marcel Holtmann76bca882009-11-18 00:40:39 +01002446 return 0;
2447}
2448EXPORT_SYMBOL(hci_recv_frame);
2449
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302450static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002451 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302452{
2453 int len = 0;
2454 int hlen = 0;
2455 int remain = count;
2456 struct sk_buff *skb;
2457 struct bt_skb_cb *scb;
2458
2459 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002460 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302461 return -EILSEQ;
2462
2463 skb = hdev->reassembly[index];
2464
2465 if (!skb) {
2466 switch (type) {
2467 case HCI_ACLDATA_PKT:
2468 len = HCI_MAX_FRAME_SIZE;
2469 hlen = HCI_ACL_HDR_SIZE;
2470 break;
2471 case HCI_EVENT_PKT:
2472 len = HCI_MAX_EVENT_SIZE;
2473 hlen = HCI_EVENT_HDR_SIZE;
2474 break;
2475 case HCI_SCODATA_PKT:
2476 len = HCI_MAX_SCO_SIZE;
2477 hlen = HCI_SCO_HDR_SIZE;
2478 break;
2479 }
2480
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002481 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302482 if (!skb)
2483 return -ENOMEM;
2484
2485 scb = (void *) skb->cb;
2486 scb->expect = hlen;
2487 scb->pkt_type = type;
2488
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302489 hdev->reassembly[index] = skb;
2490 }
2491
2492 while (count) {
2493 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002494 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302495
2496 memcpy(skb_put(skb, len), data, len);
2497
2498 count -= len;
2499 data += len;
2500 scb->expect -= len;
2501 remain = count;
2502
2503 switch (type) {
2504 case HCI_EVENT_PKT:
2505 if (skb->len == HCI_EVENT_HDR_SIZE) {
2506 struct hci_event_hdr *h = hci_event_hdr(skb);
2507 scb->expect = h->plen;
2508
2509 if (skb_tailroom(skb) < scb->expect) {
2510 kfree_skb(skb);
2511 hdev->reassembly[index] = NULL;
2512 return -ENOMEM;
2513 }
2514 }
2515 break;
2516
2517 case HCI_ACLDATA_PKT:
2518 if (skb->len == HCI_ACL_HDR_SIZE) {
2519 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2520 scb->expect = __le16_to_cpu(h->dlen);
2521
2522 if (skb_tailroom(skb) < scb->expect) {
2523 kfree_skb(skb);
2524 hdev->reassembly[index] = NULL;
2525 return -ENOMEM;
2526 }
2527 }
2528 break;
2529
2530 case HCI_SCODATA_PKT:
2531 if (skb->len == HCI_SCO_HDR_SIZE) {
2532 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2533 scb->expect = h->dlen;
2534
2535 if (skb_tailroom(skb) < scb->expect) {
2536 kfree_skb(skb);
2537 hdev->reassembly[index] = NULL;
2538 return -ENOMEM;
2539 }
2540 }
2541 break;
2542 }
2543
2544 if (scb->expect == 0) {
2545 /* Complete frame */
2546
2547 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002548 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302549
2550 hdev->reassembly[index] = NULL;
2551 return remain;
2552 }
2553 }
2554
2555 return remain;
2556}
2557
Marcel Holtmannef222012007-07-11 06:42:04 +02002558int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2559{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302560 int rem = 0;
2561
Marcel Holtmannef222012007-07-11 06:42:04 +02002562 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2563 return -EILSEQ;
2564
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002565 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002566 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302567 if (rem < 0)
2568 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002569
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302570 data += (count - rem);
2571 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002572 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002573
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302574 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002575}
2576EXPORT_SYMBOL(hci_recv_fragment);
2577
Suraj Sumangala99811512010-07-14 13:02:19 +05302578#define STREAM_REASSEMBLY 0
2579
2580int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2581{
2582 int type;
2583 int rem = 0;
2584
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002585 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302586 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2587
2588 if (!skb) {
2589 struct { char type; } *pkt;
2590
2591 /* Start of the frame */
2592 pkt = data;
2593 type = pkt->type;
2594
2595 data++;
2596 count--;
2597 } else
2598 type = bt_cb(skb)->pkt_type;
2599
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002600 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002601 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302602 if (rem < 0)
2603 return rem;
2604
2605 data += (count - rem);
2606 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002607 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302608
2609 return rem;
2610}
2611EXPORT_SYMBOL(hci_recv_stream_fragment);
2612
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613/* ---- Interface to upper protocols ---- */
2614
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615int hci_register_cb(struct hci_cb *cb)
2616{
2617 BT_DBG("%p name %s", cb, cb->name);
2618
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002619 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002621 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
2623 return 0;
2624}
2625EXPORT_SYMBOL(hci_register_cb);
2626
2627int hci_unregister_cb(struct hci_cb *cb)
2628{
2629 BT_DBG("%p name %s", cb, cb->name);
2630
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002631 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002633 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
2635 return 0;
2636}
2637EXPORT_SYMBOL(hci_unregister_cb);
2638
Marcel Holtmann51086992013-10-10 14:54:19 -07002639static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002641 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002643 /* Time stamp */
2644 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002646 /* Send copy to monitor */
2647 hci_send_to_monitor(hdev, skb);
2648
2649 if (atomic_read(&hdev->promisc)) {
2650 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002651 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 }
2653
2654 /* Get rid of skb owner, prior to sending to the driver. */
2655 skb_orphan(skb);
2656
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002657 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002658 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659}
2660
Johan Hedberg3119ae92013-03-05 20:37:44 +02002661void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2662{
2663 skb_queue_head_init(&req->cmd_q);
2664 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002665 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002666}
2667
2668int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2669{
2670 struct hci_dev *hdev = req->hdev;
2671 struct sk_buff *skb;
2672 unsigned long flags;
2673
2674 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2675
Andre Guedes5d73e032013-03-08 11:20:16 -03002676 /* If an error occured during request building, remove all HCI
2677 * commands queued on the HCI request queue.
2678 */
2679 if (req->err) {
2680 skb_queue_purge(&req->cmd_q);
2681 return req->err;
2682 }
2683
Johan Hedberg3119ae92013-03-05 20:37:44 +02002684 /* Do not allow empty requests */
2685 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002686 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002687
2688 skb = skb_peek_tail(&req->cmd_q);
2689 bt_cb(skb)->req.complete = complete;
2690
2691 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2692 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2693 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2694
2695 queue_work(hdev->workqueue, &hdev->cmd_work);
2696
2697 return 0;
2698}
2699
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002700static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002701 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702{
2703 int len = HCI_COMMAND_HDR_SIZE + plen;
2704 struct hci_command_hdr *hdr;
2705 struct sk_buff *skb;
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002708 if (!skb)
2709 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
2711 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002712 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 hdr->plen = plen;
2714
2715 if (plen)
2716 memcpy(skb_put(skb, plen), param, plen);
2717
2718 BT_DBG("skb len %d", skb->len);
2719
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002720 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002721
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002722 return skb;
2723}
2724
2725/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002726int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2727 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002728{
2729 struct sk_buff *skb;
2730
2731 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2732
2733 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2734 if (!skb) {
2735 BT_ERR("%s no memory for command", hdev->name);
2736 return -ENOMEM;
2737 }
2738
Johan Hedberg11714b32013-03-05 20:37:47 +02002739 /* Stand-alone HCI commands must be flaged as
2740 * single-command requests.
2741 */
2742 bt_cb(skb)->req.start = true;
2743
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002745 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 return 0;
2748}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Johan Hedberg71c76a12013-03-05 20:37:46 +02002750/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002751void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2752 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002753{
2754 struct hci_dev *hdev = req->hdev;
2755 struct sk_buff *skb;
2756
2757 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2758
Andre Guedes34739c12013-03-08 11:20:18 -03002759 /* If an error occured during request building, there is no point in
2760 * queueing the HCI command. We can simply return.
2761 */
2762 if (req->err)
2763 return;
2764
Johan Hedberg71c76a12013-03-05 20:37:46 +02002765 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2766 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002767 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2768 hdev->name, opcode);
2769 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002770 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002771 }
2772
2773 if (skb_queue_empty(&req->cmd_q))
2774 bt_cb(skb)->req.start = true;
2775
Johan Hedberg02350a72013-04-03 21:50:29 +03002776 bt_cb(skb)->req.event = event;
2777
Johan Hedberg71c76a12013-03-05 20:37:46 +02002778 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002779}
2780
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002781void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2782 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002783{
2784 hci_req_add_ev(req, opcode, plen, param, 0);
2785}
2786
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002788void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789{
2790 struct hci_command_hdr *hdr;
2791
2792 if (!hdev->sent_cmd)
2793 return NULL;
2794
2795 hdr = (void *) hdev->sent_cmd->data;
2796
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002797 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 return NULL;
2799
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002800 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
2802 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2803}
2804
2805/* Send ACL data */
2806static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2807{
2808 struct hci_acl_hdr *hdr;
2809 int len = skb->len;
2810
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002811 skb_push(skb, HCI_ACL_HDR_SIZE);
2812 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002813 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002814 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2815 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816}
2817
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002818static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002819 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002821 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 struct hci_dev *hdev = conn->hdev;
2823 struct sk_buff *list;
2824
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002825 skb->len = skb_headlen(skb);
2826 skb->data_len = 0;
2827
2828 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002829
2830 switch (hdev->dev_type) {
2831 case HCI_BREDR:
2832 hci_add_acl_hdr(skb, conn->handle, flags);
2833 break;
2834 case HCI_AMP:
2835 hci_add_acl_hdr(skb, chan->handle, flags);
2836 break;
2837 default:
2838 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2839 return;
2840 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002842 list = skb_shinfo(skb)->frag_list;
2843 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 /* Non fragmented */
2845 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2846
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002847 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 } else {
2849 /* Fragmented */
2850 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2851
2852 skb_shinfo(skb)->frag_list = NULL;
2853
2854 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002855 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002857 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002858
2859 flags &= ~ACL_START;
2860 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 do {
2862 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002863
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002864 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002865 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866
2867 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2868
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002869 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 } while (list);
2871
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002872 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002874}
2875
2876void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2877{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002878 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002879
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002880 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002881
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002882 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002884 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
2887/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002888void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889{
2890 struct hci_dev *hdev = conn->hdev;
2891 struct hci_sco_hdr hdr;
2892
2893 BT_DBG("%s len %d", hdev->name, skb->len);
2894
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002895 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 hdr.dlen = skb->len;
2897
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002898 skb_push(skb, HCI_SCO_HDR_SIZE);
2899 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002900 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002902 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002903
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002905 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
2908/* ---- HCI TX task (outgoing data) ---- */
2909
2910/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002911static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2912 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913{
2914 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002915 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002916 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002918 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002920
2921 rcu_read_lock();
2922
2923 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002924 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002926
2927 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2928 continue;
2929
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 num++;
2931
2932 if (c->sent < min) {
2933 min = c->sent;
2934 conn = c;
2935 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002936
2937 if (hci_conn_num(hdev, type) == num)
2938 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 }
2940
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002941 rcu_read_unlock();
2942
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002944 int cnt, q;
2945
2946 switch (conn->type) {
2947 case ACL_LINK:
2948 cnt = hdev->acl_cnt;
2949 break;
2950 case SCO_LINK:
2951 case ESCO_LINK:
2952 cnt = hdev->sco_cnt;
2953 break;
2954 case LE_LINK:
2955 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2956 break;
2957 default:
2958 cnt = 0;
2959 BT_ERR("Unknown link type");
2960 }
2961
2962 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 *quote = q ? q : 1;
2964 } else
2965 *quote = 0;
2966
2967 BT_DBG("conn %p quote %d", conn, *quote);
2968 return conn;
2969}
2970
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002971static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972{
2973 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002974 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
Ville Tervobae1f5d92011-02-10 22:38:53 -03002976 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002978 rcu_read_lock();
2979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002981 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002982 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002983 BT_ERR("%s killing stalled connection %pMR",
2984 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002985 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 }
2987 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002988
2989 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990}
2991
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002992static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2993 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002994{
2995 struct hci_conn_hash *h = &hdev->conn_hash;
2996 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002997 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002998 struct hci_conn *conn;
2999 int cnt, q, conn_num = 0;
3000
3001 BT_DBG("%s", hdev->name);
3002
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003003 rcu_read_lock();
3004
3005 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003006 struct hci_chan *tmp;
3007
3008 if (conn->type != type)
3009 continue;
3010
3011 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3012 continue;
3013
3014 conn_num++;
3015
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003016 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003017 struct sk_buff *skb;
3018
3019 if (skb_queue_empty(&tmp->data_q))
3020 continue;
3021
3022 skb = skb_peek(&tmp->data_q);
3023 if (skb->priority < cur_prio)
3024 continue;
3025
3026 if (skb->priority > cur_prio) {
3027 num = 0;
3028 min = ~0;
3029 cur_prio = skb->priority;
3030 }
3031
3032 num++;
3033
3034 if (conn->sent < min) {
3035 min = conn->sent;
3036 chan = tmp;
3037 }
3038 }
3039
3040 if (hci_conn_num(hdev, type) == conn_num)
3041 break;
3042 }
3043
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003044 rcu_read_unlock();
3045
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003046 if (!chan)
3047 return NULL;
3048
3049 switch (chan->conn->type) {
3050 case ACL_LINK:
3051 cnt = hdev->acl_cnt;
3052 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003053 case AMP_LINK:
3054 cnt = hdev->block_cnt;
3055 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003056 case SCO_LINK:
3057 case ESCO_LINK:
3058 cnt = hdev->sco_cnt;
3059 break;
3060 case LE_LINK:
3061 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3062 break;
3063 default:
3064 cnt = 0;
3065 BT_ERR("Unknown link type");
3066 }
3067
3068 q = cnt / num;
3069 *quote = q ? q : 1;
3070 BT_DBG("chan %p quote %d", chan, *quote);
3071 return chan;
3072}
3073
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003074static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3075{
3076 struct hci_conn_hash *h = &hdev->conn_hash;
3077 struct hci_conn *conn;
3078 int num = 0;
3079
3080 BT_DBG("%s", hdev->name);
3081
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003082 rcu_read_lock();
3083
3084 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003085 struct hci_chan *chan;
3086
3087 if (conn->type != type)
3088 continue;
3089
3090 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3091 continue;
3092
3093 num++;
3094
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003095 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003096 struct sk_buff *skb;
3097
3098 if (chan->sent) {
3099 chan->sent = 0;
3100 continue;
3101 }
3102
3103 if (skb_queue_empty(&chan->data_q))
3104 continue;
3105
3106 skb = skb_peek(&chan->data_q);
3107 if (skb->priority >= HCI_PRIO_MAX - 1)
3108 continue;
3109
3110 skb->priority = HCI_PRIO_MAX - 1;
3111
3112 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003113 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003114 }
3115
3116 if (hci_conn_num(hdev, type) == num)
3117 break;
3118 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003119
3120 rcu_read_unlock();
3121
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003122}
3123
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003124static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3125{
3126 /* Calculate count of blocks used by this packet */
3127 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3128}
3129
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003130static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 if (!test_bit(HCI_RAW, &hdev->flags)) {
3133 /* ACL tx timeout must be longer than maximum
3134 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003135 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003136 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003137 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003139}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003141static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003142{
3143 unsigned int cnt = hdev->acl_cnt;
3144 struct hci_chan *chan;
3145 struct sk_buff *skb;
3146 int quote;
3147
3148 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003149
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003150 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003151 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003152 u32 priority = (skb_peek(&chan->data_q))->priority;
3153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003155 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003156
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003157 /* Stop if priority has changed */
3158 if (skb->priority < priority)
3159 break;
3160
3161 skb = skb_dequeue(&chan->data_q);
3162
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003163 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003164 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003165
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003166 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 hdev->acl_last_tx = jiffies;
3168
3169 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003170 chan->sent++;
3171 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 }
3173 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003174
3175 if (cnt != hdev->acl_cnt)
3176 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177}
3178
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003179static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003180{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003181 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003182 struct hci_chan *chan;
3183 struct sk_buff *skb;
3184 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003185 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003186
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003187 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003188
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003189 BT_DBG("%s", hdev->name);
3190
3191 if (hdev->dev_type == HCI_AMP)
3192 type = AMP_LINK;
3193 else
3194 type = ACL_LINK;
3195
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003196 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003197 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003198 u32 priority = (skb_peek(&chan->data_q))->priority;
3199 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3200 int blocks;
3201
3202 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003203 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003204
3205 /* Stop if priority has changed */
3206 if (skb->priority < priority)
3207 break;
3208
3209 skb = skb_dequeue(&chan->data_q);
3210
3211 blocks = __get_blocks(hdev, skb);
3212 if (blocks > hdev->block_cnt)
3213 return;
3214
3215 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003216 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003217
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003218 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003219 hdev->acl_last_tx = jiffies;
3220
3221 hdev->block_cnt -= blocks;
3222 quote -= blocks;
3223
3224 chan->sent += blocks;
3225 chan->conn->sent += blocks;
3226 }
3227 }
3228
3229 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003230 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003231}
3232
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003233static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003234{
3235 BT_DBG("%s", hdev->name);
3236
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003237 /* No ACL link over BR/EDR controller */
3238 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3239 return;
3240
3241 /* No AMP link over AMP controller */
3242 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003243 return;
3244
3245 switch (hdev->flow_ctl_mode) {
3246 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3247 hci_sched_acl_pkt(hdev);
3248 break;
3249
3250 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3251 hci_sched_acl_blk(hdev);
3252 break;
3253 }
3254}
3255
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003257static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258{
3259 struct hci_conn *conn;
3260 struct sk_buff *skb;
3261 int quote;
3262
3263 BT_DBG("%s", hdev->name);
3264
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003265 if (!hci_conn_num(hdev, SCO_LINK))
3266 return;
3267
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3269 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3270 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003271 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272
3273 conn->sent++;
3274 if (conn->sent == ~0)
3275 conn->sent = 0;
3276 }
3277 }
3278}
3279
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003280static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003281{
3282 struct hci_conn *conn;
3283 struct sk_buff *skb;
3284 int quote;
3285
3286 BT_DBG("%s", hdev->name);
3287
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003288 if (!hci_conn_num(hdev, ESCO_LINK))
3289 return;
3290
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003291 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3292 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003293 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3294 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003295 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003296
3297 conn->sent++;
3298 if (conn->sent == ~0)
3299 conn->sent = 0;
3300 }
3301 }
3302}
3303
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003304static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003305{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003306 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003307 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003308 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003309
3310 BT_DBG("%s", hdev->name);
3311
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003312 if (!hci_conn_num(hdev, LE_LINK))
3313 return;
3314
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003315 if (!test_bit(HCI_RAW, &hdev->flags)) {
3316 /* LE tx timeout must be longer than maximum
3317 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003318 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003319 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003320 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003321 }
3322
3323 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003324 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003325 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003326 u32 priority = (skb_peek(&chan->data_q))->priority;
3327 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003328 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003329 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003330
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003331 /* Stop if priority has changed */
3332 if (skb->priority < priority)
3333 break;
3334
3335 skb = skb_dequeue(&chan->data_q);
3336
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003337 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003338 hdev->le_last_tx = jiffies;
3339
3340 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003341 chan->sent++;
3342 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003343 }
3344 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003345
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003346 if (hdev->le_pkts)
3347 hdev->le_cnt = cnt;
3348 else
3349 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003350
3351 if (cnt != tmp)
3352 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003353}
3354
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003355static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003357 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 struct sk_buff *skb;
3359
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003360 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003361 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
Marcel Holtmann52de5992013-09-03 18:08:38 -07003363 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3364 /* Schedule queues and send stuff to HCI driver */
3365 hci_sched_acl(hdev);
3366 hci_sched_sco(hdev);
3367 hci_sched_esco(hdev);
3368 hci_sched_le(hdev);
3369 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003370
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 /* Send next queued raw (unknown type) packet */
3372 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003373 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374}
3375
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003376/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377
3378/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003379static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380{
3381 struct hci_acl_hdr *hdr = (void *) skb->data;
3382 struct hci_conn *conn;
3383 __u16 handle, flags;
3384
3385 skb_pull(skb, HCI_ACL_HDR_SIZE);
3386
3387 handle = __le16_to_cpu(hdr->handle);
3388 flags = hci_flags(handle);
3389 handle = hci_handle(handle);
3390
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003391 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003392 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393
3394 hdev->stat.acl_rx++;
3395
3396 hci_dev_lock(hdev);
3397 conn = hci_conn_hash_lookup_handle(hdev, handle);
3398 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003399
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003401 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003402
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003404 l2cap_recv_acldata(conn, skb, flags);
3405 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003407 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003408 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 }
3410
3411 kfree_skb(skb);
3412}
3413
3414/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003415static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416{
3417 struct hci_sco_hdr *hdr = (void *) skb->data;
3418 struct hci_conn *conn;
3419 __u16 handle;
3420
3421 skb_pull(skb, HCI_SCO_HDR_SIZE);
3422
3423 handle = __le16_to_cpu(hdr->handle);
3424
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003425 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426
3427 hdev->stat.sco_rx++;
3428
3429 hci_dev_lock(hdev);
3430 conn = hci_conn_hash_lookup_handle(hdev, handle);
3431 hci_dev_unlock(hdev);
3432
3433 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003435 sco_recv_scodata(conn, skb);
3436 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003438 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003439 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 }
3441
3442 kfree_skb(skb);
3443}
3444
Johan Hedberg9238f362013-03-05 20:37:48 +02003445static bool hci_req_is_complete(struct hci_dev *hdev)
3446{
3447 struct sk_buff *skb;
3448
3449 skb = skb_peek(&hdev->cmd_q);
3450 if (!skb)
3451 return true;
3452
3453 return bt_cb(skb)->req.start;
3454}
3455
Johan Hedberg42c6b122013-03-05 20:37:49 +02003456static void hci_resend_last(struct hci_dev *hdev)
3457{
3458 struct hci_command_hdr *sent;
3459 struct sk_buff *skb;
3460 u16 opcode;
3461
3462 if (!hdev->sent_cmd)
3463 return;
3464
3465 sent = (void *) hdev->sent_cmd->data;
3466 opcode = __le16_to_cpu(sent->opcode);
3467 if (opcode == HCI_OP_RESET)
3468 return;
3469
3470 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3471 if (!skb)
3472 return;
3473
3474 skb_queue_head(&hdev->cmd_q, skb);
3475 queue_work(hdev->workqueue, &hdev->cmd_work);
3476}
3477
Johan Hedberg9238f362013-03-05 20:37:48 +02003478void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3479{
3480 hci_req_complete_t req_complete = NULL;
3481 struct sk_buff *skb;
3482 unsigned long flags;
3483
3484 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3485
Johan Hedberg42c6b122013-03-05 20:37:49 +02003486 /* If the completed command doesn't match the last one that was
3487 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003488 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003489 if (!hci_sent_cmd_data(hdev, opcode)) {
3490 /* Some CSR based controllers generate a spontaneous
3491 * reset complete event during init and any pending
3492 * command will never be completed. In such a case we
3493 * need to resend whatever was the last sent
3494 * command.
3495 */
3496 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3497 hci_resend_last(hdev);
3498
Johan Hedberg9238f362013-03-05 20:37:48 +02003499 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003500 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003501
3502 /* If the command succeeded and there's still more commands in
3503 * this request the request is not yet complete.
3504 */
3505 if (!status && !hci_req_is_complete(hdev))
3506 return;
3507
3508 /* If this was the last command in a request the complete
3509 * callback would be found in hdev->sent_cmd instead of the
3510 * command queue (hdev->cmd_q).
3511 */
3512 if (hdev->sent_cmd) {
3513 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003514
3515 if (req_complete) {
3516 /* We must set the complete callback to NULL to
3517 * avoid calling the callback more than once if
3518 * this function gets called again.
3519 */
3520 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3521
Johan Hedberg9238f362013-03-05 20:37:48 +02003522 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003523 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003524 }
3525
3526 /* Remove all pending commands belonging to this request */
3527 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3528 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3529 if (bt_cb(skb)->req.start) {
3530 __skb_queue_head(&hdev->cmd_q, skb);
3531 break;
3532 }
3533
3534 req_complete = bt_cb(skb)->req.complete;
3535 kfree_skb(skb);
3536 }
3537 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3538
3539call_complete:
3540 if (req_complete)
3541 req_complete(hdev, status);
3542}
3543
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003544static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003546 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 struct sk_buff *skb;
3548
3549 BT_DBG("%s", hdev->name);
3550
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003552 /* Send copy to monitor */
3553 hci_send_to_monitor(hdev, skb);
3554
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 if (atomic_read(&hdev->promisc)) {
3556 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003557 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 }
3559
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003560 if (test_bit(HCI_RAW, &hdev->flags) ||
3561 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 kfree_skb(skb);
3563 continue;
3564 }
3565
3566 if (test_bit(HCI_INIT, &hdev->flags)) {
3567 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003568 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 case HCI_ACLDATA_PKT:
3570 case HCI_SCODATA_PKT:
3571 kfree_skb(skb);
3572 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 }
3575
3576 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003577 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003579 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 hci_event_packet(hdev, skb);
3581 break;
3582
3583 case HCI_ACLDATA_PKT:
3584 BT_DBG("%s ACL data packet", hdev->name);
3585 hci_acldata_packet(hdev, skb);
3586 break;
3587
3588 case HCI_SCODATA_PKT:
3589 BT_DBG("%s SCO data packet", hdev->name);
3590 hci_scodata_packet(hdev, skb);
3591 break;
3592
3593 default:
3594 kfree_skb(skb);
3595 break;
3596 }
3597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598}
3599
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003600static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003602 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 struct sk_buff *skb;
3604
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003605 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3606 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003609 if (atomic_read(&hdev->cmd_cnt)) {
3610 skb = skb_dequeue(&hdev->cmd_q);
3611 if (!skb)
3612 return;
3613
Wei Yongjun7585b972009-02-25 18:29:52 +08003614 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003616 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003617 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003619 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003620 if (test_bit(HCI_RESET, &hdev->flags))
3621 del_timer(&hdev->cmd_timer);
3622 else
3623 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003624 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 } else {
3626 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003627 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 }
3629 }
3630}