blob: 925bd47c4a3cff9c197eda70fa2c90b1a76b7c75 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200356 __le16 param;
357 __u8 flt_type;
358
359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200361
362 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200364
365 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200367
368 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200370
371 /* Clear Event Filters */
372 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200374
375 /* Connection accept timeout ~20 secs */
376 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378
Johan Hedbergf332ec62013-03-15 17:07:11 -0500379 /* Read page scan parameters */
380 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
382 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
383 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384}
385
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200387{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300388 struct hci_dev *hdev = req->hdev;
389
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395
396 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200401
402 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300404
405 /* LE-only controllers have LE implicitly enabled */
406 if (!lmp_bredr_capable(hdev))
407 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408}
409
410static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
411{
412 if (lmp_ext_inq_capable(hdev))
413 return 0x02;
414
415 if (lmp_inq_rssi_capable(hdev))
416 return 0x01;
417
418 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
419 hdev->lmp_subver == 0x0757)
420 return 0x01;
421
422 if (hdev->manufacturer == 15) {
423 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
424 return 0x01;
425 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
426 return 0x01;
427 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
428 return 0x01;
429 }
430
431 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
432 hdev->lmp_subver == 0x1805)
433 return 0x01;
434
435 return 0x00;
436}
437
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439{
440 u8 mode;
441
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445}
446
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200448{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 struct hci_dev *hdev = req->hdev;
450
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451 /* The second byte is 0xff instead of 0x9f (two reserved bits
452 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
453 * command otherwise.
454 */
455 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
456
457 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
458 * any event mask for pre 1.2 devices.
459 */
460 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
461 return;
462
463 if (lmp_bredr_capable(hdev)) {
464 events[4] |= 0x01; /* Flow Specification Complete */
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466 events[4] |= 0x04; /* Read Remote Extended Features Complete */
467 events[5] |= 0x08; /* Synchronous Connection Complete */
468 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700469 } else {
470 /* Use a different default for LE-only devices */
471 memset(events, 0, sizeof(events));
472 events[0] |= 0x10; /* Disconnection Complete */
473 events[0] |= 0x80; /* Encryption Change */
474 events[1] |= 0x08; /* Read Remote Version Information Complete */
475 events[1] |= 0x20; /* Command Complete */
476 events[1] |= 0x40; /* Command Status */
477 events[1] |= 0x80; /* Hardware Error */
478 events[2] |= 0x04; /* Number of Completed Packets */
479 events[3] |= 0x02; /* Data Buffer Overflow */
480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200481 }
482
483 if (lmp_inq_rssi_capable(hdev))
484 events[4] |= 0x02; /* Inquiry Result with RSSI */
485
486 if (lmp_sniffsubr_capable(hdev))
487 events[5] |= 0x20; /* Sniff Subrating */
488
489 if (lmp_pause_enc_capable(hdev))
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
491
492 if (lmp_ext_inq_capable(hdev))
493 events[5] |= 0x40; /* Extended Inquiry Result */
494
495 if (lmp_no_flush_capable(hdev))
496 events[7] |= 0x01; /* Enhanced Flush Complete */
497
498 if (lmp_lsto_capable(hdev))
499 events[6] |= 0x80; /* Link Supervision Timeout Changed */
500
501 if (lmp_ssp_capable(hdev)) {
502 events[6] |= 0x01; /* IO Capability Request */
503 events[6] |= 0x02; /* IO Capability Response */
504 events[6] |= 0x04; /* User Confirmation Request */
505 events[6] |= 0x08; /* User Passkey Request */
506 events[6] |= 0x10; /* Remote OOB Data Request */
507 events[6] |= 0x20; /* Simple Pairing Complete */
508 events[7] |= 0x04; /* User Passkey Notification */
509 events[7] |= 0x08; /* Keypress Notification */
510 events[7] |= 0x10; /* Remote Host Supported
511 * Features Notification
512 */
513 }
514
515 if (lmp_le_capable(hdev))
516 events[7] |= 0x20; /* LE Meta-Event */
517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519
520 if (lmp_le_capable(hdev)) {
521 memset(events, 0, sizeof(events));
522 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200523 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
524 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525 }
526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 struct hci_dev *hdev = req->hdev;
531
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300534 else
535 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300542 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
543 * local supported commands HCI command.
544 */
545 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547
548 if (lmp_ssp_capable(hdev)) {
549 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
550 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200551 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
552 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553 } else {
554 struct hci_cp_write_eir cp;
555
556 memset(hdev->eir, 0, sizeof(hdev->eir));
557 memset(&cp, 0, sizeof(cp));
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560 }
561 }
562
563 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200568
569 if (lmp_ext_feat_capable(hdev)) {
570 struct hci_cp_read_local_ext_features cp;
571
572 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
574 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 }
576
577 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
578 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
580 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 }
582}
583
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587 struct hci_cp_write_def_link_policy cp;
588 u16 link_policy = 0;
589
590 if (lmp_rswitch_capable(hdev))
591 link_policy |= HCI_LP_RSWITCH;
592 if (lmp_hold_capable(hdev))
593 link_policy |= HCI_LP_HOLD;
594 if (lmp_sniff_capable(hdev))
595 link_policy |= HCI_LP_SNIFF;
596 if (lmp_park_capable(hdev))
597 link_policy |= HCI_LP_PARK;
598
599 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601}
602
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200604{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606 struct hci_cp_write_le_host_supported cp;
607
Johan Hedbergc73eee92013-04-19 18:35:21 +0300608 /* LE-only devices do not support explicit enablement */
609 if (!lmp_bredr_capable(hdev))
610 return;
611
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612 memset(&cp, 0, sizeof(cp));
613
614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
615 cp.le = 0x01;
616 cp.simul = lmp_le_br_capable(hdev);
617 }
618
619 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
621 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622}
623
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300624static void hci_set_event_mask_page_2(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
628
629 /* If Connectionless Slave Broadcast master role is supported
630 * enable all necessary events for it.
631 */
632 if (hdev->features[2][0] & 0x01) {
633 events[1] |= 0x40; /* Triggered Clock Capture */
634 events[1] |= 0x80; /* Synchronization Train Complete */
635 events[2] |= 0x10; /* Slave Page Response Timeout */
636 events[2] |= 0x20; /* CSB Channel Map Change */
637 }
638
639 /* If Connectionless Slave Broadcast slave role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x02) {
643 events[2] |= 0x01; /* Synchronization Train Received */
644 events[2] |= 0x02; /* CSB Receive */
645 events[2] |= 0x04; /* CSB Timeout */
646 events[2] |= 0x08; /* Truncated Page Complete */
647 }
648
649 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
650}
651
Johan Hedberg42c6b122013-03-05 20:37:49 +0200652static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300655 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100657 /* Some Broadcom based Bluetooth controllers do not support the
658 * Delete Stored Link Key command. They are clearly indicating its
659 * absence in the bit mask of supported commands.
660 *
661 * Check the supported commands and only if the the command is marked
662 * as supported send it. If not supported assume that the controller
663 * does not have actual support for stored link keys which makes this
664 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700665 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300666 if (hdev->commands[6] & 0x80) {
667 struct hci_cp_delete_stored_link_key cp;
668
669 bacpy(&cp.bdaddr, BDADDR_ANY);
670 cp.delete_all = 0x01;
671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
672 sizeof(cp), &cp);
673 }
674
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500678 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500680 hci_update_ad(req);
681 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300682
683 /* Read features beyond page 1 if available */
684 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
685 struct hci_cp_read_local_ext_features cp;
686
687 cp.page = p;
688 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
689 sizeof(cp), &cp);
690 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691}
692
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300693static void hci_init4_req(struct hci_request *req, unsigned long opt)
694{
695 struct hci_dev *hdev = req->hdev;
696
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300697 /* Set event mask page 2 if the HCI command for it is supported */
698 if (hdev->commands[22] & 0x04)
699 hci_set_event_mask_page_2(req);
700
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300701 /* Check for Synchronization Train support */
702 if (hdev->features[2][0] & 0x04)
703 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
704}
705
Johan Hedberg2177bab2013-03-05 20:37:43 +0200706static int __hci_init(struct hci_dev *hdev)
707{
708 int err;
709
710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
711 if (err < 0)
712 return err;
713
714 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
715 * BR/EDR/LE type controllers. AMP controllers only need the
716 * first stage init.
717 */
718 if (hdev->dev_type != HCI_BREDR)
719 return 0;
720
721 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
722 if (err < 0)
723 return err;
724
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300725 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
726 if (err < 0)
727 return err;
728
729 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200730}
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 __u8 scan = opt;
735
Johan Hedberg42c6b122013-03-05 20:37:49 +0200736 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
744 __u8 auth = opt;
745
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753{
754 __u8 encrypt = opt;
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763{
764 __le16 policy = cpu_to_le16(opt);
765
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200767
768 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200769 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200770}
771
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900772/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * Device is held on return. */
774struct hci_dev *hci_dev_get(int index)
775{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200776 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 BT_DBG("%d", index);
779
780 if (index < 0)
781 return NULL;
782
783 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200784 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (d->id == index) {
786 hdev = hci_dev_hold(d);
787 break;
788 }
789 }
790 read_unlock(&hci_dev_list_lock);
791 return hdev;
792}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200795
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200796bool hci_discovery_active(struct hci_dev *hdev)
797{
798 struct discovery_state *discov = &hdev->discovery;
799
Andre Guedes6fbe1952012-02-03 17:47:58 -0300800 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300801 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300802 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803 return true;
804
Andre Guedes6fbe1952012-02-03 17:47:58 -0300805 default:
806 return false;
807 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200808}
809
Johan Hedbergff9ef572012-01-04 14:23:45 +0200810void hci_discovery_set_state(struct hci_dev *hdev, int state)
811{
812 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
813
814 if (hdev->discovery.state == state)
815 return;
816
817 switch (state) {
818 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300819 if (hdev->discovery.state != DISCOVERY_STARTING)
820 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200821 break;
822 case DISCOVERY_STARTING:
823 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300824 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200825 mgmt_discovering(hdev, 1);
826 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200827 case DISCOVERY_RESOLVING:
828 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200829 case DISCOVERY_STOPPING:
830 break;
831 }
832
833 hdev->discovery.state = state;
834}
835
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300836void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
Johan Hedberg30883512012-01-04 14:16:21 +0200838 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200839 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Johan Hedberg561aafb2012-01-04 13:31:59 +0200841 list_for_each_entry_safe(p, n, &cache->all, all) {
842 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200843 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200845
846 INIT_LIST_HEAD(&cache->unknown);
847 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
849
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300850struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
851 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
Johan Hedberg30883512012-01-04 14:16:21 +0200853 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 struct inquiry_entry *e;
855
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300856 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Johan Hedberg561aafb2012-01-04 13:31:59 +0200858 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200860 return e;
861 }
862
863 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
Johan Hedberg561aafb2012-01-04 13:31:59 +0200866struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300867 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200868{
Johan Hedberg30883512012-01-04 14:16:21 +0200869 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200870 struct inquiry_entry *e;
871
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300872 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200873
874 list_for_each_entry(e, &cache->unknown, list) {
875 if (!bacmp(&e->data.bdaddr, bdaddr))
876 return e;
877 }
878
879 return NULL;
880}
881
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200882struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300883 bdaddr_t *bdaddr,
884 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200885{
886 struct discovery_state *cache = &hdev->discovery;
887 struct inquiry_entry *e;
888
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300889 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200890
891 list_for_each_entry(e, &cache->resolve, list) {
892 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
893 return e;
894 if (!bacmp(&e->data.bdaddr, bdaddr))
895 return e;
896 }
897
898 return NULL;
899}
900
Johan Hedberga3d4e202012-01-09 00:53:02 +0200901void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300902 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200903{
904 struct discovery_state *cache = &hdev->discovery;
905 struct list_head *pos = &cache->resolve;
906 struct inquiry_entry *p;
907
908 list_del(&ie->list);
909
910 list_for_each_entry(p, &cache->resolve, list) {
911 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300912 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200913 break;
914 pos = &p->list;
915 }
916
917 list_add(&ie->list, pos);
918}
919
Johan Hedberg31754052012-01-04 13:39:52 +0200920bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300921 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Johan Hedberg30883512012-01-04 14:16:21 +0200923 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200924 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300926 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Szymon Janc2b2fec42012-11-20 11:38:54 +0100928 hci_remove_remote_oob_data(hdev, &data->bdaddr);
929
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200930 if (ssp)
931 *ssp = data->ssp_mode;
932
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200933 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200934 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200935 if (ie->data.ssp_mode && ssp)
936 *ssp = true;
937
Johan Hedberga3d4e202012-01-09 00:53:02 +0200938 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300939 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200940 ie->data.rssi = data->rssi;
941 hci_inquiry_cache_update_resolve(hdev, ie);
942 }
943
Johan Hedberg561aafb2012-01-04 13:31:59 +0200944 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200945 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200946
Johan Hedberg561aafb2012-01-04 13:31:59 +0200947 /* Entry not in the cache. Add new one. */
948 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
949 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200950 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200951
952 list_add(&ie->all, &cache->all);
953
954 if (name_known) {
955 ie->name_state = NAME_KNOWN;
956 } else {
957 ie->name_state = NAME_NOT_KNOWN;
958 list_add(&ie->list, &cache->unknown);
959 }
960
961update:
962 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300963 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200964 ie->name_state = NAME_KNOWN;
965 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200968 memcpy(&ie->data, data, sizeof(*data));
969 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200971
972 if (ie->name_state == NAME_NOT_KNOWN)
973 return false;
974
975 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
978static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
979{
Johan Hedberg30883512012-01-04 14:16:21 +0200980 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct inquiry_info *info = (struct inquiry_info *) buf;
982 struct inquiry_entry *e;
983 int copied = 0;
984
Johan Hedberg561aafb2012-01-04 13:31:59 +0200985 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987
988 if (copied >= num)
989 break;
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 bacpy(&info->bdaddr, &data->bdaddr);
992 info->pscan_rep_mode = data->pscan_rep_mode;
993 info->pscan_period_mode = data->pscan_period_mode;
994 info->pscan_mode = data->pscan_mode;
995 memcpy(info->dev_class, data->dev_class, 3);
996 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200999 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 }
1001
1002 BT_DBG("cache %p, copied %d", cache, copied);
1003 return copied;
1004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
1008 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 struct hci_cp_inquiry cp;
1011
1012 BT_DBG("%s", hdev->name);
1013
1014 if (test_bit(HCI_INQUIRY, &hdev->flags))
1015 return;
1016
1017 /* Start Inquiry */
1018 memcpy(&cp.lap, &ir->lap, 3);
1019 cp.length = ir->length;
1020 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001021 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022}
1023
Andre Guedes3e13fa12013-03-27 20:04:56 -03001024static int wait_inquiry(void *word)
1025{
1026 schedule();
1027 return signal_pending(current);
1028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030int hci_inquiry(void __user *arg)
1031{
1032 __u8 __user *ptr = arg;
1033 struct hci_inquiry_req ir;
1034 struct hci_dev *hdev;
1035 int err = 0, do_inquiry = 0, max_rsp;
1036 long timeo;
1037 __u8 *buf;
1038
1039 if (copy_from_user(&ir, ptr, sizeof(ir)))
1040 return -EFAULT;
1041
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001042 hdev = hci_dev_get(ir.dev_id);
1043 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return -ENODEV;
1045
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001046 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1047 err = -EBUSY;
1048 goto done;
1049 }
1050
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001051 if (hdev->dev_type != HCI_BREDR) {
1052 err = -EOPNOTSUPP;
1053 goto done;
1054 }
1055
Johan Hedberg56f87902013-10-02 13:43:13 +03001056 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1057 err = -EOPNOTSUPP;
1058 goto done;
1059 }
1060
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001061 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001062 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001063 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001064 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 do_inquiry = 1;
1066 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001067 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Marcel Holtmann04837f62006-07-03 10:02:33 +02001069 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070
1071 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001072 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1073 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001074 if (err < 0)
1075 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001076
1077 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1078 * cleared). If it is interrupted by a signal, return -EINTR.
1079 */
1080 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1081 TASK_INTERRUPTIBLE))
1082 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001085 /* for unlimited number of responses we will use buffer with
1086 * 255 entries
1087 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1089
1090 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1091 * copy it to the user space.
1092 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001093 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001094 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 err = -ENOMEM;
1096 goto done;
1097 }
1098
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001099 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001101 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
1103 BT_DBG("num_rsp %d", ir.num_rsp);
1104
1105 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1106 ptr += sizeof(ir);
1107 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001108 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001110 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 err = -EFAULT;
1112
1113 kfree(buf);
1114
1115done:
1116 hci_dev_put(hdev);
1117 return err;
1118}
1119
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001120static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1121{
1122 u8 ad_len = 0, flags = 0;
1123 size_t name_len;
1124
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001125 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001126 flags |= LE_AD_GENERAL;
1127
Johan Hedberg11802b22013-10-02 16:02:24 +03001128 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1129 if (lmp_le_br_capable(hdev))
1130 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1131 if (lmp_host_le_br_capable(hdev))
1132 flags |= LE_AD_SIM_LE_BREDR_HOST;
1133 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001134 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001135 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001136
1137 if (flags) {
1138 BT_DBG("adv flags 0x%02x", flags);
1139
1140 ptr[0] = 2;
1141 ptr[1] = EIR_FLAGS;
1142 ptr[2] = flags;
1143
1144 ad_len += 3;
1145 ptr += 3;
1146 }
1147
1148 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1149 ptr[0] = 2;
1150 ptr[1] = EIR_TX_POWER;
1151 ptr[2] = (u8) hdev->adv_tx_power;
1152
1153 ad_len += 3;
1154 ptr += 3;
1155 }
1156
1157 name_len = strlen(hdev->dev_name);
1158 if (name_len > 0) {
1159 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1160
1161 if (name_len > max_len) {
1162 name_len = max_len;
1163 ptr[1] = EIR_NAME_SHORT;
1164 } else
1165 ptr[1] = EIR_NAME_COMPLETE;
1166
1167 ptr[0] = name_len + 1;
1168
1169 memcpy(ptr + 2, hdev->dev_name, name_len);
1170
1171 ad_len += (name_len + 2);
1172 ptr += (name_len + 2);
1173 }
1174
1175 return ad_len;
1176}
1177
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001178void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001179{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001180 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001181 struct hci_cp_le_set_adv_data cp;
1182 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001183
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001184 if (!lmp_le_capable(hdev))
1185 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001186
1187 memset(&cp, 0, sizeof(cp));
1188
1189 len = create_ad(hdev, cp.data);
1190
1191 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001192 memcmp(cp.data, hdev->adv_data, len) == 0)
1193 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001194
1195 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1196 hdev->adv_data_len = len;
1197
1198 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001199
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001200 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001201}
1202
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001203static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 int ret = 0;
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 BT_DBG("%s %p", hdev->name, hdev);
1208
1209 hci_req_lock(hdev);
1210
Johan Hovold94324962012-03-15 14:48:41 +01001211 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1212 ret = -ENODEV;
1213 goto done;
1214 }
1215
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001216 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1217 /* Check for rfkill but allow the HCI setup stage to
1218 * proceed (which in itself doesn't cause any RF activity).
1219 */
1220 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1221 ret = -ERFKILL;
1222 goto done;
1223 }
1224
1225 /* Check for valid public address or a configured static
1226 * random adddress, but let the HCI setup proceed to
1227 * be able to determine if there is a public address
1228 * or not.
1229 *
1230 * This check is only valid for BR/EDR controllers
1231 * since AMP controllers do not have an address.
1232 */
1233 if (hdev->dev_type == HCI_BREDR &&
1234 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1235 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1236 ret = -EADDRNOTAVAIL;
1237 goto done;
1238 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001239 }
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (test_bit(HCI_UP, &hdev->flags)) {
1242 ret = -EALREADY;
1243 goto done;
1244 }
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 if (hdev->open(hdev)) {
1247 ret = -EIO;
1248 goto done;
1249 }
1250
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001251 atomic_set(&hdev->cmd_cnt, 1);
1252 set_bit(HCI_INIT, &hdev->flags);
1253
1254 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1255 ret = hdev->setup(hdev);
1256
1257 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001258 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1259 set_bit(HCI_RAW, &hdev->flags);
1260
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001261 if (!test_bit(HCI_RAW, &hdev->flags) &&
1262 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001263 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 }
1265
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001266 clear_bit(HCI_INIT, &hdev->flags);
1267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 if (!ret) {
1269 hci_dev_hold(hdev);
1270 set_bit(HCI_UP, &hdev->flags);
1271 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001272 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001273 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001274 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001275 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001276 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001277 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001278 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001279 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001281 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001282 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001283 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
1285 skb_queue_purge(&hdev->cmd_q);
1286 skb_queue_purge(&hdev->rx_q);
1287
1288 if (hdev->flush)
1289 hdev->flush(hdev);
1290
1291 if (hdev->sent_cmd) {
1292 kfree_skb(hdev->sent_cmd);
1293 hdev->sent_cmd = NULL;
1294 }
1295
1296 hdev->close(hdev);
1297 hdev->flags = 0;
1298 }
1299
1300done:
1301 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return ret;
1303}
1304
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001305/* ---- HCI ioctl helpers ---- */
1306
1307int hci_dev_open(__u16 dev)
1308{
1309 struct hci_dev *hdev;
1310 int err;
1311
1312 hdev = hci_dev_get(dev);
1313 if (!hdev)
1314 return -ENODEV;
1315
Johan Hedberge1d08f42013-10-01 22:44:50 +03001316 /* We need to ensure that no other power on/off work is pending
1317 * before proceeding to call hci_dev_do_open. This is
1318 * particularly important if the setup procedure has not yet
1319 * completed.
1320 */
1321 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1322 cancel_delayed_work(&hdev->power_off);
1323
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001324 /* After this call it is guaranteed that the setup procedure
1325 * has finished. This means that error conditions like RFKILL
1326 * or no valid public or static random address apply.
1327 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001328 flush_workqueue(hdev->req_workqueue);
1329
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001330 err = hci_dev_do_open(hdev);
1331
1332 hci_dev_put(hdev);
1333
1334 return err;
1335}
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337static int hci_dev_do_close(struct hci_dev *hdev)
1338{
1339 BT_DBG("%s %p", hdev->name, hdev);
1340
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001341 cancel_delayed_work(&hdev->power_off);
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 hci_req_cancel(hdev, ENODEV);
1344 hci_req_lock(hdev);
1345
1346 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001347 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 hci_req_unlock(hdev);
1349 return 0;
1350 }
1351
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001352 /* Flush RX and TX works */
1353 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001354 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001356 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001357 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001358 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001359 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001360 }
1361
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001363 cancel_delayed_work(&hdev->service_cache);
1364
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001365 cancel_delayed_work_sync(&hdev->le_scan_disable);
1366
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001367 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001368 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001370 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372 hci_notify(hdev, HCI_DEV_DOWN);
1373
1374 if (hdev->flush)
1375 hdev->flush(hdev);
1376
1377 /* Reset device */
1378 skb_queue_purge(&hdev->cmd_q);
1379 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001380 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001381 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001383 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 clear_bit(HCI_INIT, &hdev->flags);
1385 }
1386
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001387 /* flush cmd work */
1388 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
1390 /* Drop queues */
1391 skb_queue_purge(&hdev->rx_q);
1392 skb_queue_purge(&hdev->cmd_q);
1393 skb_queue_purge(&hdev->raw_q);
1394
1395 /* Drop last sent command */
1396 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001397 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 kfree_skb(hdev->sent_cmd);
1399 hdev->sent_cmd = NULL;
1400 }
1401
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001402 kfree_skb(hdev->recv_evt);
1403 hdev->recv_evt = NULL;
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 /* After this point our queues are empty
1406 * and no tasks are scheduled. */
1407 hdev->close(hdev);
1408
Johan Hedberg35b973c2013-03-15 17:06:59 -05001409 /* Clear flags */
1410 hdev->flags = 0;
1411 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1412
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001413 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1414 if (hdev->dev_type == HCI_BREDR) {
1415 hci_dev_lock(hdev);
1416 mgmt_powered(hdev, 0);
1417 hci_dev_unlock(hdev);
1418 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001419 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001420
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001421 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001422 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001423
Johan Hedberge59fda82012-02-22 18:11:53 +02001424 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001425 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 hci_req_unlock(hdev);
1428
1429 hci_dev_put(hdev);
1430 return 0;
1431}
1432
1433int hci_dev_close(__u16 dev)
1434{
1435 struct hci_dev *hdev;
1436 int err;
1437
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001438 hdev = hci_dev_get(dev);
1439 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001441
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001442 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1443 err = -EBUSY;
1444 goto done;
1445 }
1446
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001447 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1448 cancel_delayed_work(&hdev->power_off);
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001451
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001452done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 hci_dev_put(hdev);
1454 return err;
1455}
1456
1457int hci_dev_reset(__u16 dev)
1458{
1459 struct hci_dev *hdev;
1460 int ret = 0;
1461
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001462 hdev = hci_dev_get(dev);
1463 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 return -ENODEV;
1465
1466 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
Marcel Holtmann808a0492013-08-26 20:57:58 -07001468 if (!test_bit(HCI_UP, &hdev->flags)) {
1469 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001473 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1474 ret = -EBUSY;
1475 goto done;
1476 }
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 /* Drop queues */
1479 skb_queue_purge(&hdev->rx_q);
1480 skb_queue_purge(&hdev->cmd_q);
1481
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001482 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001483 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001485 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
1487 if (hdev->flush)
1488 hdev->flush(hdev);
1489
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001490 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001491 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
1493 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001494 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
1496done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 hci_req_unlock(hdev);
1498 hci_dev_put(hdev);
1499 return ret;
1500}
1501
1502int hci_dev_reset_stat(__u16 dev)
1503{
1504 struct hci_dev *hdev;
1505 int ret = 0;
1506
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001507 hdev = hci_dev_get(dev);
1508 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 return -ENODEV;
1510
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001511 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1512 ret = -EBUSY;
1513 goto done;
1514 }
1515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1517
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001518done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 return ret;
1521}
1522
1523int hci_dev_cmd(unsigned int cmd, void __user *arg)
1524{
1525 struct hci_dev *hdev;
1526 struct hci_dev_req dr;
1527 int err = 0;
1528
1529 if (copy_from_user(&dr, arg, sizeof(dr)))
1530 return -EFAULT;
1531
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001532 hdev = hci_dev_get(dr.dev_id);
1533 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return -ENODEV;
1535
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1537 err = -EBUSY;
1538 goto done;
1539 }
1540
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001541 if (hdev->dev_type != HCI_BREDR) {
1542 err = -EOPNOTSUPP;
1543 goto done;
1544 }
1545
Johan Hedberg56f87902013-10-02 13:43:13 +03001546 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1547 err = -EOPNOTSUPP;
1548 goto done;
1549 }
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 switch (cmd) {
1552 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001553 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1554 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 break;
1556
1557 case HCISETENCRYPT:
1558 if (!lmp_encrypt_capable(hdev)) {
1559 err = -EOPNOTSUPP;
1560 break;
1561 }
1562
1563 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1564 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001565 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1566 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 if (err)
1568 break;
1569 }
1570
Johan Hedberg01178cd2013-03-05 20:37:41 +02001571 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1572 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 break;
1574
1575 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001576 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1577 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 break;
1579
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001580 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001581 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1582 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001583 break;
1584
1585 case HCISETLINKMODE:
1586 hdev->link_mode = ((__u16) dr.dev_opt) &
1587 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1588 break;
1589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 case HCISETPTYPE:
1591 hdev->pkt_type = (__u16) dr.dev_opt;
1592 break;
1593
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001595 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1596 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 break;
1598
1599 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001600 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1601 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 break;
1603
1604 default:
1605 err = -EINVAL;
1606 break;
1607 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001608
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001609done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 hci_dev_put(hdev);
1611 return err;
1612}
1613
1614int hci_get_dev_list(void __user *arg)
1615{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001616 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 struct hci_dev_list_req *dl;
1618 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 int n = 0, size, err;
1620 __u16 dev_num;
1621
1622 if (get_user(dev_num, (__u16 __user *) arg))
1623 return -EFAULT;
1624
1625 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1626 return -EINVAL;
1627
1628 size = sizeof(*dl) + dev_num * sizeof(*dr);
1629
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001630 dl = kzalloc(size, GFP_KERNEL);
1631 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 return -ENOMEM;
1633
1634 dr = dl->dev_req;
1635
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001636 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001637 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001638 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001639 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001640
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001641 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1642 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 (dr + n)->dev_id = hdev->id;
1645 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 if (++n >= dev_num)
1648 break;
1649 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001650 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
1652 dl->dev_num = n;
1653 size = sizeof(*dl) + n * sizeof(*dr);
1654
1655 err = copy_to_user(arg, dl, size);
1656 kfree(dl);
1657
1658 return err ? -EFAULT : 0;
1659}
1660
1661int hci_get_dev_info(void __user *arg)
1662{
1663 struct hci_dev *hdev;
1664 struct hci_dev_info di;
1665 int err = 0;
1666
1667 if (copy_from_user(&di, arg, sizeof(di)))
1668 return -EFAULT;
1669
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001670 hdev = hci_dev_get(di.dev_id);
1671 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 return -ENODEV;
1673
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001675 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001676
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1678 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 strcpy(di.name, hdev->name);
1681 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001682 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 di.flags = hdev->flags;
1684 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001685 if (lmp_bredr_capable(hdev)) {
1686 di.acl_mtu = hdev->acl_mtu;
1687 di.acl_pkts = hdev->acl_pkts;
1688 di.sco_mtu = hdev->sco_mtu;
1689 di.sco_pkts = hdev->sco_pkts;
1690 } else {
1691 di.acl_mtu = hdev->le_mtu;
1692 di.acl_pkts = hdev->le_pkts;
1693 di.sco_mtu = 0;
1694 di.sco_pkts = 0;
1695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 di.link_policy = hdev->link_policy;
1697 di.link_mode = hdev->link_mode;
1698
1699 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1700 memcpy(&di.features, &hdev->features, sizeof(di.features));
1701
1702 if (copy_to_user(arg, &di, sizeof(di)))
1703 err = -EFAULT;
1704
1705 hci_dev_put(hdev);
1706
1707 return err;
1708}
1709
1710/* ---- Interface to HCI drivers ---- */
1711
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001712static int hci_rfkill_set_block(void *data, bool blocked)
1713{
1714 struct hci_dev *hdev = data;
1715
1716 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1717
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001718 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1719 return -EBUSY;
1720
Johan Hedberg5e130362013-09-13 08:58:17 +03001721 if (blocked) {
1722 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001723 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1724 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001725 } else {
1726 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001727 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001728
1729 return 0;
1730}
1731
1732static const struct rfkill_ops hci_rfkill_ops = {
1733 .set_block = hci_rfkill_set_block,
1734};
1735
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001736static void hci_power_on(struct work_struct *work)
1737{
1738 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001739 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001740
1741 BT_DBG("%s", hdev->name);
1742
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001743 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001744 if (err < 0) {
1745 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001746 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001747 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001748
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001749 /* During the HCI setup phase, a few error conditions are
1750 * ignored and they need to be checked now. If they are still
1751 * valid, it is important to turn the device back off.
1752 */
1753 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1754 (hdev->dev_type == HCI_BREDR &&
1755 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1756 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001757 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1758 hci_dev_do_close(hdev);
1759 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001760 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1761 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001762 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001763
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001764 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001765 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001766}
1767
1768static void hci_power_off(struct work_struct *work)
1769{
Johan Hedberg32435532011-11-07 22:16:04 +02001770 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001771 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001772
1773 BT_DBG("%s", hdev->name);
1774
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001775 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001776}
1777
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001778static void hci_discov_off(struct work_struct *work)
1779{
1780 struct hci_dev *hdev;
1781 u8 scan = SCAN_PAGE;
1782
1783 hdev = container_of(work, struct hci_dev, discov_off.work);
1784
1785 BT_DBG("%s", hdev->name);
1786
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001787 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001788
1789 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1790
1791 hdev->discov_timeout = 0;
1792
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001793 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001794}
1795
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001796int hci_uuids_clear(struct hci_dev *hdev)
1797{
Johan Hedberg48210022013-01-27 00:31:28 +02001798 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001799
Johan Hedberg48210022013-01-27 00:31:28 +02001800 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1801 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001802 kfree(uuid);
1803 }
1804
1805 return 0;
1806}
1807
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001808int hci_link_keys_clear(struct hci_dev *hdev)
1809{
1810 struct list_head *p, *n;
1811
1812 list_for_each_safe(p, n, &hdev->link_keys) {
1813 struct link_key *key;
1814
1815 key = list_entry(p, struct link_key, list);
1816
1817 list_del(p);
1818 kfree(key);
1819 }
1820
1821 return 0;
1822}
1823
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001824int hci_smp_ltks_clear(struct hci_dev *hdev)
1825{
1826 struct smp_ltk *k, *tmp;
1827
1828 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1829 list_del(&k->list);
1830 kfree(k);
1831 }
1832
1833 return 0;
1834}
1835
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001836struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1837{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001838 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001839
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001840 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001841 if (bacmp(bdaddr, &k->bdaddr) == 0)
1842 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001843
1844 return NULL;
1845}
1846
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301847static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001848 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001849{
1850 /* Legacy key */
1851 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301852 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001853
1854 /* Debug keys are insecure so don't store them persistently */
1855 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301856 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001857
1858 /* Changed combination key and there's no previous one */
1859 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301860 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001861
1862 /* Security mode 3 case */
1863 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301864 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001865
1866 /* Neither local nor remote side had no-bonding as requirement */
1867 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301868 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001869
1870 /* Local side had dedicated bonding as requirement */
1871 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301872 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001873
1874 /* Remote side had dedicated bonding as requirement */
1875 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301876 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001877
1878 /* If none of the above criteria match, then don't store the key
1879 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301880 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001881}
1882
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001883struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001884{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001885 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001886
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001887 list_for_each_entry(k, &hdev->long_term_keys, list) {
1888 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001889 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001890 continue;
1891
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001892 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001893 }
1894
1895 return NULL;
1896}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001897
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001898struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001899 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001900{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001901 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001902
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001903 list_for_each_entry(k, &hdev->long_term_keys, list)
1904 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001905 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001906 return k;
1907
1908 return NULL;
1909}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001910
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001911int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001912 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001913{
1914 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301915 u8 old_key_type;
1916 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001917
1918 old_key = hci_find_link_key(hdev, bdaddr);
1919 if (old_key) {
1920 old_key_type = old_key->type;
1921 key = old_key;
1922 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001923 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001924 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1925 if (!key)
1926 return -ENOMEM;
1927 list_add(&key->list, &hdev->link_keys);
1928 }
1929
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001930 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001931
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001932 /* Some buggy controller combinations generate a changed
1933 * combination key for legacy pairing even when there's no
1934 * previous key */
1935 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001936 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001937 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001938 if (conn)
1939 conn->key_type = type;
1940 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001941
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001942 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001943 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001944 key->pin_len = pin_len;
1945
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001946 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001947 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001948 else
1949 key->type = type;
1950
Johan Hedberg4df378a2011-04-28 11:29:03 -07001951 if (!new_key)
1952 return 0;
1953
1954 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1955
Johan Hedberg744cf192011-11-08 20:40:14 +02001956 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001957
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301958 if (conn)
1959 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001960
1961 return 0;
1962}
1963
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001964int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001965 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001966 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001967{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001968 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001969
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001970 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1971 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001972
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001973 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1974 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001975 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001976 else {
1977 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001978 if (!key)
1979 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001980 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001981 }
1982
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001983 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001984 key->bdaddr_type = addr_type;
1985 memcpy(key->val, tk, sizeof(key->val));
1986 key->authenticated = authenticated;
1987 key->ediv = ediv;
1988 key->enc_size = enc_size;
1989 key->type = type;
1990 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001991
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001992 if (!new_key)
1993 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001994
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001995 if (type & HCI_SMP_LTK)
1996 mgmt_new_ltk(hdev, key, 1);
1997
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001998 return 0;
1999}
2000
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002001int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2002{
2003 struct link_key *key;
2004
2005 key = hci_find_link_key(hdev, bdaddr);
2006 if (!key)
2007 return -ENOENT;
2008
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002009 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002010
2011 list_del(&key->list);
2012 kfree(key);
2013
2014 return 0;
2015}
2016
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002017int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2018{
2019 struct smp_ltk *k, *tmp;
2020
2021 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2022 if (bacmp(bdaddr, &k->bdaddr))
2023 continue;
2024
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002025 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002026
2027 list_del(&k->list);
2028 kfree(k);
2029 }
2030
2031 return 0;
2032}
2033
Ville Tervo6bd32322011-02-16 16:32:41 +02002034/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002035static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002036{
2037 struct hci_dev *hdev = (void *) arg;
2038
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002039 if (hdev->sent_cmd) {
2040 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2041 u16 opcode = __le16_to_cpu(sent->opcode);
2042
2043 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2044 } else {
2045 BT_ERR("%s command tx timeout", hdev->name);
2046 }
2047
Ville Tervo6bd32322011-02-16 16:32:41 +02002048 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002049 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002050}
2051
Szymon Janc2763eda2011-03-22 13:12:22 +01002052struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002053 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002054{
2055 struct oob_data *data;
2056
2057 list_for_each_entry(data, &hdev->remote_oob_data, list)
2058 if (bacmp(bdaddr, &data->bdaddr) == 0)
2059 return data;
2060
2061 return NULL;
2062}
2063
2064int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2065{
2066 struct oob_data *data;
2067
2068 data = hci_find_remote_oob_data(hdev, bdaddr);
2069 if (!data)
2070 return -ENOENT;
2071
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002072 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002073
2074 list_del(&data->list);
2075 kfree(data);
2076
2077 return 0;
2078}
2079
2080int hci_remote_oob_data_clear(struct hci_dev *hdev)
2081{
2082 struct oob_data *data, *n;
2083
2084 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2085 list_del(&data->list);
2086 kfree(data);
2087 }
2088
2089 return 0;
2090}
2091
2092int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002093 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002094{
2095 struct oob_data *data;
2096
2097 data = hci_find_remote_oob_data(hdev, bdaddr);
2098
2099 if (!data) {
2100 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2101 if (!data)
2102 return -ENOMEM;
2103
2104 bacpy(&data->bdaddr, bdaddr);
2105 list_add(&data->list, &hdev->remote_oob_data);
2106 }
2107
2108 memcpy(data->hash, hash, sizeof(data->hash));
2109 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2110
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002111 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002112
2113 return 0;
2114}
2115
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002116struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002117{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002118 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002119
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002120 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002121 if (bacmp(bdaddr, &b->bdaddr) == 0)
2122 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002123
2124 return NULL;
2125}
2126
2127int hci_blacklist_clear(struct hci_dev *hdev)
2128{
2129 struct list_head *p, *n;
2130
2131 list_for_each_safe(p, n, &hdev->blacklist) {
2132 struct bdaddr_list *b;
2133
2134 b = list_entry(p, struct bdaddr_list, list);
2135
2136 list_del(p);
2137 kfree(b);
2138 }
2139
2140 return 0;
2141}
2142
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002143int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002144{
2145 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002146
2147 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2148 return -EBADF;
2149
Antti Julku5e762442011-08-25 16:48:02 +03002150 if (hci_blacklist_lookup(hdev, bdaddr))
2151 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152
2153 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002154 if (!entry)
2155 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002156
2157 bacpy(&entry->bdaddr, bdaddr);
2158
2159 list_add(&entry->list, &hdev->blacklist);
2160
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002161 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002162}
2163
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002164int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002165{
2166 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002167
Szymon Janc1ec918c2011-11-16 09:32:21 +01002168 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002169 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002170
2171 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002172 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002173 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002174
2175 list_del(&entry->list);
2176 kfree(entry);
2177
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002178 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002179}
2180
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002181static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002182{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002183 if (status) {
2184 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002185
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002186 hci_dev_lock(hdev);
2187 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2188 hci_dev_unlock(hdev);
2189 return;
2190 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002191}
2192
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002193static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002194{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002195 /* General inquiry access code (GIAC) */
2196 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2197 struct hci_request req;
2198 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002199 int err;
2200
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002201 if (status) {
2202 BT_ERR("Failed to disable LE scanning: status %d", status);
2203 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002204 }
2205
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002206 switch (hdev->discovery.type) {
2207 case DISCOV_TYPE_LE:
2208 hci_dev_lock(hdev);
2209 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2210 hci_dev_unlock(hdev);
2211 break;
2212
2213 case DISCOV_TYPE_INTERLEAVED:
2214 hci_req_init(&req, hdev);
2215
2216 memset(&cp, 0, sizeof(cp));
2217 memcpy(&cp.lap, lap, sizeof(cp.lap));
2218 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2219 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2220
2221 hci_dev_lock(hdev);
2222
2223 hci_inquiry_cache_flush(hdev);
2224
2225 err = hci_req_run(&req, inquiry_complete);
2226 if (err) {
2227 BT_ERR("Inquiry request failed: err %d", err);
2228 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2229 }
2230
2231 hci_dev_unlock(hdev);
2232 break;
2233 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002234}
2235
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002236static void le_scan_disable_work(struct work_struct *work)
2237{
2238 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002239 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002240 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002241 struct hci_request req;
2242 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002243
2244 BT_DBG("%s", hdev->name);
2245
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002246 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002247
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002248 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002249 cp.enable = LE_SCAN_DISABLE;
2250 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002251
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002252 err = hci_req_run(&req, le_scan_disable_work_complete);
2253 if (err)
2254 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002255}
2256
David Herrmann9be0dab2012-04-22 14:39:57 +02002257/* Alloc HCI device */
2258struct hci_dev *hci_alloc_dev(void)
2259{
2260 struct hci_dev *hdev;
2261
2262 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2263 if (!hdev)
2264 return NULL;
2265
David Herrmannb1b813d2012-04-22 14:39:58 +02002266 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2267 hdev->esco_type = (ESCO_HV1);
2268 hdev->link_mode = (HCI_LM_ACCEPT);
2269 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002270 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2271 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002272
David Herrmannb1b813d2012-04-22 14:39:58 +02002273 hdev->sniff_max_interval = 800;
2274 hdev->sniff_min_interval = 80;
2275
2276 mutex_init(&hdev->lock);
2277 mutex_init(&hdev->req_lock);
2278
2279 INIT_LIST_HEAD(&hdev->mgmt_pending);
2280 INIT_LIST_HEAD(&hdev->blacklist);
2281 INIT_LIST_HEAD(&hdev->uuids);
2282 INIT_LIST_HEAD(&hdev->link_keys);
2283 INIT_LIST_HEAD(&hdev->long_term_keys);
2284 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002285 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002286
2287 INIT_WORK(&hdev->rx_work, hci_rx_work);
2288 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2289 INIT_WORK(&hdev->tx_work, hci_tx_work);
2290 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002291
David Herrmannb1b813d2012-04-22 14:39:58 +02002292 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2293 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2294 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2295
David Herrmannb1b813d2012-04-22 14:39:58 +02002296 skb_queue_head_init(&hdev->rx_q);
2297 skb_queue_head_init(&hdev->cmd_q);
2298 skb_queue_head_init(&hdev->raw_q);
2299
2300 init_waitqueue_head(&hdev->req_wait_q);
2301
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002302 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002303
David Herrmannb1b813d2012-04-22 14:39:58 +02002304 hci_init_sysfs(hdev);
2305 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002306
2307 return hdev;
2308}
2309EXPORT_SYMBOL(hci_alloc_dev);
2310
2311/* Free HCI device */
2312void hci_free_dev(struct hci_dev *hdev)
2313{
David Herrmann9be0dab2012-04-22 14:39:57 +02002314 /* will free via device release */
2315 put_device(&hdev->dev);
2316}
2317EXPORT_SYMBOL(hci_free_dev);
2318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319/* Register HCI device */
2320int hci_register_dev(struct hci_dev *hdev)
2321{
David Herrmannb1b813d2012-04-22 14:39:58 +02002322 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
David Herrmann010666a2012-01-07 15:47:07 +01002324 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 return -EINVAL;
2326
Mat Martineau08add512011-11-02 16:18:36 -07002327 /* Do not allow HCI_AMP devices to register at index 0,
2328 * so the index can be used as the AMP controller ID.
2329 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002330 switch (hdev->dev_type) {
2331 case HCI_BREDR:
2332 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2333 break;
2334 case HCI_AMP:
2335 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2336 break;
2337 default:
2338 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002340
Sasha Levin3df92b32012-05-27 22:36:56 +02002341 if (id < 0)
2342 return id;
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 sprintf(hdev->name, "hci%d", id);
2345 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002346
2347 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2348
Kees Cookd8537542013-07-03 15:04:57 -07002349 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2350 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002351 if (!hdev->workqueue) {
2352 error = -ENOMEM;
2353 goto err;
2354 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002355
Kees Cookd8537542013-07-03 15:04:57 -07002356 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2357 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002358 if (!hdev->req_workqueue) {
2359 destroy_workqueue(hdev->workqueue);
2360 error = -ENOMEM;
2361 goto err;
2362 }
2363
David Herrmann33ca9542011-10-08 14:58:49 +02002364 error = hci_add_sysfs(hdev);
2365 if (error < 0)
2366 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002368 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002369 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2370 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002371 if (hdev->rfkill) {
2372 if (rfkill_register(hdev->rfkill) < 0) {
2373 rfkill_destroy(hdev->rfkill);
2374 hdev->rfkill = NULL;
2375 }
2376 }
2377
Johan Hedberg5e130362013-09-13 08:58:17 +03002378 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2379 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2380
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002381 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002382 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002383
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002384 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002385 /* Assume BR/EDR support until proven otherwise (such as
2386 * through reading supported features during init.
2387 */
2388 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2389 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002390
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002391 write_lock(&hci_dev_list_lock);
2392 list_add(&hdev->list, &hci_dev_list);
2393 write_unlock(&hci_dev_list_lock);
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002396 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
Johan Hedberg19202572013-01-14 22:33:51 +02002398 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002401
David Herrmann33ca9542011-10-08 14:58:49 +02002402err_wqueue:
2403 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002404 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002405err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002406 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002407
David Herrmann33ca9542011-10-08 14:58:49 +02002408 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409}
2410EXPORT_SYMBOL(hci_register_dev);
2411
2412/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002413void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414{
Sasha Levin3df92b32012-05-27 22:36:56 +02002415 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002416
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002417 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
Johan Hovold94324962012-03-15 14:48:41 +01002419 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2420
Sasha Levin3df92b32012-05-27 22:36:56 +02002421 id = hdev->id;
2422
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002423 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002425 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
2427 hci_dev_do_close(hdev);
2428
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302429 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002430 kfree_skb(hdev->reassembly[i]);
2431
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002432 cancel_work_sync(&hdev->power_on);
2433
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002434 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002435 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002436 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002437 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002438 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002439 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002440
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002441 /* mgmt_index_removed should take care of emptying the
2442 * pending list */
2443 BUG_ON(!list_empty(&hdev->mgmt_pending));
2444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 hci_notify(hdev, HCI_DEV_UNREG);
2446
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002447 if (hdev->rfkill) {
2448 rfkill_unregister(hdev->rfkill);
2449 rfkill_destroy(hdev->rfkill);
2450 }
2451
David Herrmannce242972011-10-08 14:58:48 +02002452 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002453
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002454 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002455 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002456
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002457 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002458 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002459 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002460 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002461 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002462 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002463 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002464
David Herrmanndc946bd2012-01-07 15:47:24 +01002465 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002466
2467 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468}
2469EXPORT_SYMBOL(hci_unregister_dev);
2470
2471/* Suspend HCI device */
2472int hci_suspend_dev(struct hci_dev *hdev)
2473{
2474 hci_notify(hdev, HCI_DEV_SUSPEND);
2475 return 0;
2476}
2477EXPORT_SYMBOL(hci_suspend_dev);
2478
2479/* Resume HCI device */
2480int hci_resume_dev(struct hci_dev *hdev)
2481{
2482 hci_notify(hdev, HCI_DEV_RESUME);
2483 return 0;
2484}
2485EXPORT_SYMBOL(hci_resume_dev);
2486
Marcel Holtmann76bca882009-11-18 00:40:39 +01002487/* Receive frame from HCI drivers */
2488int hci_recv_frame(struct sk_buff *skb)
2489{
2490 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2491 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002492 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002493 kfree_skb(skb);
2494 return -ENXIO;
2495 }
2496
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002497 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002498 bt_cb(skb)->incoming = 1;
2499
2500 /* Time stamp */
2501 __net_timestamp(skb);
2502
Marcel Holtmann76bca882009-11-18 00:40:39 +01002503 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002504 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002505
Marcel Holtmann76bca882009-11-18 00:40:39 +01002506 return 0;
2507}
2508EXPORT_SYMBOL(hci_recv_frame);
2509
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302510static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002511 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302512{
2513 int len = 0;
2514 int hlen = 0;
2515 int remain = count;
2516 struct sk_buff *skb;
2517 struct bt_skb_cb *scb;
2518
2519 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002520 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302521 return -EILSEQ;
2522
2523 skb = hdev->reassembly[index];
2524
2525 if (!skb) {
2526 switch (type) {
2527 case HCI_ACLDATA_PKT:
2528 len = HCI_MAX_FRAME_SIZE;
2529 hlen = HCI_ACL_HDR_SIZE;
2530 break;
2531 case HCI_EVENT_PKT:
2532 len = HCI_MAX_EVENT_SIZE;
2533 hlen = HCI_EVENT_HDR_SIZE;
2534 break;
2535 case HCI_SCODATA_PKT:
2536 len = HCI_MAX_SCO_SIZE;
2537 hlen = HCI_SCO_HDR_SIZE;
2538 break;
2539 }
2540
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002541 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302542 if (!skb)
2543 return -ENOMEM;
2544
2545 scb = (void *) skb->cb;
2546 scb->expect = hlen;
2547 scb->pkt_type = type;
2548
2549 skb->dev = (void *) hdev;
2550 hdev->reassembly[index] = skb;
2551 }
2552
2553 while (count) {
2554 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002555 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302556
2557 memcpy(skb_put(skb, len), data, len);
2558
2559 count -= len;
2560 data += len;
2561 scb->expect -= len;
2562 remain = count;
2563
2564 switch (type) {
2565 case HCI_EVENT_PKT:
2566 if (skb->len == HCI_EVENT_HDR_SIZE) {
2567 struct hci_event_hdr *h = hci_event_hdr(skb);
2568 scb->expect = h->plen;
2569
2570 if (skb_tailroom(skb) < scb->expect) {
2571 kfree_skb(skb);
2572 hdev->reassembly[index] = NULL;
2573 return -ENOMEM;
2574 }
2575 }
2576 break;
2577
2578 case HCI_ACLDATA_PKT:
2579 if (skb->len == HCI_ACL_HDR_SIZE) {
2580 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2581 scb->expect = __le16_to_cpu(h->dlen);
2582
2583 if (skb_tailroom(skb) < scb->expect) {
2584 kfree_skb(skb);
2585 hdev->reassembly[index] = NULL;
2586 return -ENOMEM;
2587 }
2588 }
2589 break;
2590
2591 case HCI_SCODATA_PKT:
2592 if (skb->len == HCI_SCO_HDR_SIZE) {
2593 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2594 scb->expect = h->dlen;
2595
2596 if (skb_tailroom(skb) < scb->expect) {
2597 kfree_skb(skb);
2598 hdev->reassembly[index] = NULL;
2599 return -ENOMEM;
2600 }
2601 }
2602 break;
2603 }
2604
2605 if (scb->expect == 0) {
2606 /* Complete frame */
2607
2608 bt_cb(skb)->pkt_type = type;
2609 hci_recv_frame(skb);
2610
2611 hdev->reassembly[index] = NULL;
2612 return remain;
2613 }
2614 }
2615
2616 return remain;
2617}
2618
Marcel Holtmannef222012007-07-11 06:42:04 +02002619int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2620{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302621 int rem = 0;
2622
Marcel Holtmannef222012007-07-11 06:42:04 +02002623 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2624 return -EILSEQ;
2625
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002626 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002627 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302628 if (rem < 0)
2629 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002630
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302631 data += (count - rem);
2632 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002633 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002634
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302635 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002636}
2637EXPORT_SYMBOL(hci_recv_fragment);
2638
Suraj Sumangala99811512010-07-14 13:02:19 +05302639#define STREAM_REASSEMBLY 0
2640
2641int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2642{
2643 int type;
2644 int rem = 0;
2645
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002646 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302647 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2648
2649 if (!skb) {
2650 struct { char type; } *pkt;
2651
2652 /* Start of the frame */
2653 pkt = data;
2654 type = pkt->type;
2655
2656 data++;
2657 count--;
2658 } else
2659 type = bt_cb(skb)->pkt_type;
2660
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002661 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002662 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302663 if (rem < 0)
2664 return rem;
2665
2666 data += (count - rem);
2667 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002668 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302669
2670 return rem;
2671}
2672EXPORT_SYMBOL(hci_recv_stream_fragment);
2673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674/* ---- Interface to upper protocols ---- */
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676int hci_register_cb(struct hci_cb *cb)
2677{
2678 BT_DBG("%p name %s", cb, cb->name);
2679
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002680 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002682 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
2684 return 0;
2685}
2686EXPORT_SYMBOL(hci_register_cb);
2687
2688int hci_unregister_cb(struct hci_cb *cb)
2689{
2690 BT_DBG("%p name %s", cb, cb->name);
2691
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002692 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002694 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
2696 return 0;
2697}
2698EXPORT_SYMBOL(hci_unregister_cb);
2699
Marcel Holtmann57d17d72013-10-10 14:54:17 -07002700static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 if (!hdev) {
2703 kfree_skb(skb);
2704 return -ENODEV;
2705 }
2706
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002707 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002709 /* Time stamp */
2710 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002712 /* Send copy to monitor */
2713 hci_send_to_monitor(hdev, skb);
2714
2715 if (atomic_read(&hdev->promisc)) {
2716 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002717 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 }
2719
2720 /* Get rid of skb owner, prior to sending to the driver. */
2721 skb_orphan(skb);
2722
Marcel Holtmann57d17d72013-10-10 14:54:17 -07002723 skb->dev = (void *) hdev;
2724
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 return hdev->send(skb);
2726}
2727
Johan Hedberg3119ae92013-03-05 20:37:44 +02002728void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2729{
2730 skb_queue_head_init(&req->cmd_q);
2731 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002732 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002733}
2734
2735int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2736{
2737 struct hci_dev *hdev = req->hdev;
2738 struct sk_buff *skb;
2739 unsigned long flags;
2740
2741 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2742
Andre Guedes5d73e032013-03-08 11:20:16 -03002743 /* If an error occured during request building, remove all HCI
2744 * commands queued on the HCI request queue.
2745 */
2746 if (req->err) {
2747 skb_queue_purge(&req->cmd_q);
2748 return req->err;
2749 }
2750
Johan Hedberg3119ae92013-03-05 20:37:44 +02002751 /* Do not allow empty requests */
2752 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002753 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002754
2755 skb = skb_peek_tail(&req->cmd_q);
2756 bt_cb(skb)->req.complete = complete;
2757
2758 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2759 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2760 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2761
2762 queue_work(hdev->workqueue, &hdev->cmd_work);
2763
2764 return 0;
2765}
2766
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002767static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002768 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769{
2770 int len = HCI_COMMAND_HDR_SIZE + plen;
2771 struct hci_command_hdr *hdr;
2772 struct sk_buff *skb;
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002775 if (!skb)
2776 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
2778 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002779 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 hdr->plen = plen;
2781
2782 if (plen)
2783 memcpy(skb_put(skb, plen), param, plen);
2784
2785 BT_DBG("skb len %d", skb->len);
2786
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002787 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002788
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002789 return skb;
2790}
2791
2792/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002793int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2794 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002795{
2796 struct sk_buff *skb;
2797
2798 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2799
2800 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2801 if (!skb) {
2802 BT_ERR("%s no memory for command", hdev->name);
2803 return -ENOMEM;
2804 }
2805
Johan Hedberg11714b32013-03-05 20:37:47 +02002806 /* Stand-alone HCI commands must be flaged as
2807 * single-command requests.
2808 */
2809 bt_cb(skb)->req.start = true;
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002812 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
2814 return 0;
2815}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Johan Hedberg71c76a12013-03-05 20:37:46 +02002817/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002818void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2819 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002820{
2821 struct hci_dev *hdev = req->hdev;
2822 struct sk_buff *skb;
2823
2824 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2825
Andre Guedes34739c12013-03-08 11:20:18 -03002826 /* If an error occured during request building, there is no point in
2827 * queueing the HCI command. We can simply return.
2828 */
2829 if (req->err)
2830 return;
2831
Johan Hedberg71c76a12013-03-05 20:37:46 +02002832 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2833 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002834 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2835 hdev->name, opcode);
2836 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002837 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002838 }
2839
2840 if (skb_queue_empty(&req->cmd_q))
2841 bt_cb(skb)->req.start = true;
2842
Johan Hedberg02350a72013-04-03 21:50:29 +03002843 bt_cb(skb)->req.event = event;
2844
Johan Hedberg71c76a12013-03-05 20:37:46 +02002845 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002846}
2847
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002848void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2849 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002850{
2851 hci_req_add_ev(req, opcode, plen, param, 0);
2852}
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002855void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856{
2857 struct hci_command_hdr *hdr;
2858
2859 if (!hdev->sent_cmd)
2860 return NULL;
2861
2862 hdr = (void *) hdev->sent_cmd->data;
2863
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002864 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 return NULL;
2866
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002867 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
2869 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2870}
2871
2872/* Send ACL data */
2873static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2874{
2875 struct hci_acl_hdr *hdr;
2876 int len = skb->len;
2877
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002878 skb_push(skb, HCI_ACL_HDR_SIZE);
2879 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002880 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002881 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2882 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883}
2884
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002885static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002886 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002888 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 struct hci_dev *hdev = conn->hdev;
2890 struct sk_buff *list;
2891
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002892 skb->len = skb_headlen(skb);
2893 skb->data_len = 0;
2894
2895 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002896
2897 switch (hdev->dev_type) {
2898 case HCI_BREDR:
2899 hci_add_acl_hdr(skb, conn->handle, flags);
2900 break;
2901 case HCI_AMP:
2902 hci_add_acl_hdr(skb, chan->handle, flags);
2903 break;
2904 default:
2905 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2906 return;
2907 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002909 list = skb_shinfo(skb)->frag_list;
2910 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 /* Non fragmented */
2912 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2913
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002914 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 } else {
2916 /* Fragmented */
2917 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2918
2919 skb_shinfo(skb)->frag_list = NULL;
2920
2921 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002922 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002924 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002925
2926 flags &= ~ACL_START;
2927 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 do {
2929 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002930
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002931 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002932 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
2934 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2935
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002936 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 } while (list);
2938
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002939 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002941}
2942
2943void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2944{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002945 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002946
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002947 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002948
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002949 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002951 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953
2954/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002955void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956{
2957 struct hci_dev *hdev = conn->hdev;
2958 struct hci_sco_hdr hdr;
2959
2960 BT_DBG("%s len %d", hdev->name, skb->len);
2961
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002962 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 hdr.dlen = skb->len;
2964
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002965 skb_push(skb, HCI_SCO_HDR_SIZE);
2966 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002967 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002969 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002972 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
2975/* ---- HCI TX task (outgoing data) ---- */
2976
2977/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002978static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2979 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980{
2981 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002982 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002983 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002985 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002987
2988 rcu_read_lock();
2989
2990 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002991 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002993
2994 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2995 continue;
2996
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 num++;
2998
2999 if (c->sent < min) {
3000 min = c->sent;
3001 conn = c;
3002 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003003
3004 if (hci_conn_num(hdev, type) == num)
3005 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 }
3007
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003008 rcu_read_unlock();
3009
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003011 int cnt, q;
3012
3013 switch (conn->type) {
3014 case ACL_LINK:
3015 cnt = hdev->acl_cnt;
3016 break;
3017 case SCO_LINK:
3018 case ESCO_LINK:
3019 cnt = hdev->sco_cnt;
3020 break;
3021 case LE_LINK:
3022 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3023 break;
3024 default:
3025 cnt = 0;
3026 BT_ERR("Unknown link type");
3027 }
3028
3029 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 *quote = q ? q : 1;
3031 } else
3032 *quote = 0;
3033
3034 BT_DBG("conn %p quote %d", conn, *quote);
3035 return conn;
3036}
3037
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003038static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039{
3040 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003041 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Ville Tervobae1f5d92011-02-10 22:38:53 -03003043 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003045 rcu_read_lock();
3046
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003048 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003049 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003050 BT_ERR("%s killing stalled connection %pMR",
3051 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003052 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 }
3054 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003055
3056 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057}
3058
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003059static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3060 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003061{
3062 struct hci_conn_hash *h = &hdev->conn_hash;
3063 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003064 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065 struct hci_conn *conn;
3066 int cnt, q, conn_num = 0;
3067
3068 BT_DBG("%s", hdev->name);
3069
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003070 rcu_read_lock();
3071
3072 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003073 struct hci_chan *tmp;
3074
3075 if (conn->type != type)
3076 continue;
3077
3078 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3079 continue;
3080
3081 conn_num++;
3082
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003083 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003084 struct sk_buff *skb;
3085
3086 if (skb_queue_empty(&tmp->data_q))
3087 continue;
3088
3089 skb = skb_peek(&tmp->data_q);
3090 if (skb->priority < cur_prio)
3091 continue;
3092
3093 if (skb->priority > cur_prio) {
3094 num = 0;
3095 min = ~0;
3096 cur_prio = skb->priority;
3097 }
3098
3099 num++;
3100
3101 if (conn->sent < min) {
3102 min = conn->sent;
3103 chan = tmp;
3104 }
3105 }
3106
3107 if (hci_conn_num(hdev, type) == conn_num)
3108 break;
3109 }
3110
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003111 rcu_read_unlock();
3112
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003113 if (!chan)
3114 return NULL;
3115
3116 switch (chan->conn->type) {
3117 case ACL_LINK:
3118 cnt = hdev->acl_cnt;
3119 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003120 case AMP_LINK:
3121 cnt = hdev->block_cnt;
3122 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003123 case SCO_LINK:
3124 case ESCO_LINK:
3125 cnt = hdev->sco_cnt;
3126 break;
3127 case LE_LINK:
3128 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3129 break;
3130 default:
3131 cnt = 0;
3132 BT_ERR("Unknown link type");
3133 }
3134
3135 q = cnt / num;
3136 *quote = q ? q : 1;
3137 BT_DBG("chan %p quote %d", chan, *quote);
3138 return chan;
3139}
3140
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003141static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3142{
3143 struct hci_conn_hash *h = &hdev->conn_hash;
3144 struct hci_conn *conn;
3145 int num = 0;
3146
3147 BT_DBG("%s", hdev->name);
3148
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003149 rcu_read_lock();
3150
3151 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003152 struct hci_chan *chan;
3153
3154 if (conn->type != type)
3155 continue;
3156
3157 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3158 continue;
3159
3160 num++;
3161
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003162 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003163 struct sk_buff *skb;
3164
3165 if (chan->sent) {
3166 chan->sent = 0;
3167 continue;
3168 }
3169
3170 if (skb_queue_empty(&chan->data_q))
3171 continue;
3172
3173 skb = skb_peek(&chan->data_q);
3174 if (skb->priority >= HCI_PRIO_MAX - 1)
3175 continue;
3176
3177 skb->priority = HCI_PRIO_MAX - 1;
3178
3179 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003180 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003181 }
3182
3183 if (hci_conn_num(hdev, type) == num)
3184 break;
3185 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003186
3187 rcu_read_unlock();
3188
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003189}
3190
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003191static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3192{
3193 /* Calculate count of blocks used by this packet */
3194 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3195}
3196
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003197static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 if (!test_bit(HCI_RAW, &hdev->flags)) {
3200 /* ACL tx timeout must be longer than maximum
3201 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003202 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003203 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003204 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003208static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003209{
3210 unsigned int cnt = hdev->acl_cnt;
3211 struct hci_chan *chan;
3212 struct sk_buff *skb;
3213 int quote;
3214
3215 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003216
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003217 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003218 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003219 u32 priority = (skb_peek(&chan->data_q))->priority;
3220 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003221 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003222 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003223
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003224 /* Stop if priority has changed */
3225 if (skb->priority < priority)
3226 break;
3227
3228 skb = skb_dequeue(&chan->data_q);
3229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003230 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003231 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003232
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003233 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 hdev->acl_last_tx = jiffies;
3235
3236 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003237 chan->sent++;
3238 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 }
3240 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003241
3242 if (cnt != hdev->acl_cnt)
3243 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244}
3245
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003246static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003247{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003248 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003249 struct hci_chan *chan;
3250 struct sk_buff *skb;
3251 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003252 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003254 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003255
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003256 BT_DBG("%s", hdev->name);
3257
3258 if (hdev->dev_type == HCI_AMP)
3259 type = AMP_LINK;
3260 else
3261 type = ACL_LINK;
3262
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003263 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003264 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003265 u32 priority = (skb_peek(&chan->data_q))->priority;
3266 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3267 int blocks;
3268
3269 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003270 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003271
3272 /* Stop if priority has changed */
3273 if (skb->priority < priority)
3274 break;
3275
3276 skb = skb_dequeue(&chan->data_q);
3277
3278 blocks = __get_blocks(hdev, skb);
3279 if (blocks > hdev->block_cnt)
3280 return;
3281
3282 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003283 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003284
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003285 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003286 hdev->acl_last_tx = jiffies;
3287
3288 hdev->block_cnt -= blocks;
3289 quote -= blocks;
3290
3291 chan->sent += blocks;
3292 chan->conn->sent += blocks;
3293 }
3294 }
3295
3296 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003297 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003298}
3299
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003300static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003301{
3302 BT_DBG("%s", hdev->name);
3303
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003304 /* No ACL link over BR/EDR controller */
3305 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3306 return;
3307
3308 /* No AMP link over AMP controller */
3309 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003310 return;
3311
3312 switch (hdev->flow_ctl_mode) {
3313 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3314 hci_sched_acl_pkt(hdev);
3315 break;
3316
3317 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3318 hci_sched_acl_blk(hdev);
3319 break;
3320 }
3321}
3322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003324static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325{
3326 struct hci_conn *conn;
3327 struct sk_buff *skb;
3328 int quote;
3329
3330 BT_DBG("%s", hdev->name);
3331
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003332 if (!hci_conn_num(hdev, SCO_LINK))
3333 return;
3334
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3336 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3337 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003338 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
3340 conn->sent++;
3341 if (conn->sent == ~0)
3342 conn->sent = 0;
3343 }
3344 }
3345}
3346
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003347static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003348{
3349 struct hci_conn *conn;
3350 struct sk_buff *skb;
3351 int quote;
3352
3353 BT_DBG("%s", hdev->name);
3354
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003355 if (!hci_conn_num(hdev, ESCO_LINK))
3356 return;
3357
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003358 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3359 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003360 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3361 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003362 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003363
3364 conn->sent++;
3365 if (conn->sent == ~0)
3366 conn->sent = 0;
3367 }
3368 }
3369}
3370
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003371static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003372{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003373 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003374 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003375 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003376
3377 BT_DBG("%s", hdev->name);
3378
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003379 if (!hci_conn_num(hdev, LE_LINK))
3380 return;
3381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003382 if (!test_bit(HCI_RAW, &hdev->flags)) {
3383 /* LE tx timeout must be longer than maximum
3384 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003385 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003386 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003387 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003388 }
3389
3390 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003391 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003392 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003393 u32 priority = (skb_peek(&chan->data_q))->priority;
3394 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003395 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003396 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003397
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003398 /* Stop if priority has changed */
3399 if (skb->priority < priority)
3400 break;
3401
3402 skb = skb_dequeue(&chan->data_q);
3403
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003404 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003405 hdev->le_last_tx = jiffies;
3406
3407 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003408 chan->sent++;
3409 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003410 }
3411 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003412
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003413 if (hdev->le_pkts)
3414 hdev->le_cnt = cnt;
3415 else
3416 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003417
3418 if (cnt != tmp)
3419 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003420}
3421
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003422static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003424 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 struct sk_buff *skb;
3426
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003427 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003428 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
Marcel Holtmann52de5992013-09-03 18:08:38 -07003430 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3431 /* Schedule queues and send stuff to HCI driver */
3432 hci_sched_acl(hdev);
3433 hci_sched_sco(hdev);
3434 hci_sched_esco(hdev);
3435 hci_sched_le(hdev);
3436 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003437
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 /* Send next queued raw (unknown type) packet */
3439 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003440 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441}
3442
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003443/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444
3445/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003446static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447{
3448 struct hci_acl_hdr *hdr = (void *) skb->data;
3449 struct hci_conn *conn;
3450 __u16 handle, flags;
3451
3452 skb_pull(skb, HCI_ACL_HDR_SIZE);
3453
3454 handle = __le16_to_cpu(hdr->handle);
3455 flags = hci_flags(handle);
3456 handle = hci_handle(handle);
3457
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003458 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003459 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
3461 hdev->stat.acl_rx++;
3462
3463 hci_dev_lock(hdev);
3464 conn = hci_conn_hash_lookup_handle(hdev, handle);
3465 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003466
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003468 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003469
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003471 l2cap_recv_acldata(conn, skb, flags);
3472 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003474 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003475 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 }
3477
3478 kfree_skb(skb);
3479}
3480
3481/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003482static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483{
3484 struct hci_sco_hdr *hdr = (void *) skb->data;
3485 struct hci_conn *conn;
3486 __u16 handle;
3487
3488 skb_pull(skb, HCI_SCO_HDR_SIZE);
3489
3490 handle = __le16_to_cpu(hdr->handle);
3491
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003492 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493
3494 hdev->stat.sco_rx++;
3495
3496 hci_dev_lock(hdev);
3497 conn = hci_conn_hash_lookup_handle(hdev, handle);
3498 hci_dev_unlock(hdev);
3499
3500 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003502 sco_recv_scodata(conn, skb);
3503 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003505 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003506 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 }
3508
3509 kfree_skb(skb);
3510}
3511
Johan Hedberg9238f362013-03-05 20:37:48 +02003512static bool hci_req_is_complete(struct hci_dev *hdev)
3513{
3514 struct sk_buff *skb;
3515
3516 skb = skb_peek(&hdev->cmd_q);
3517 if (!skb)
3518 return true;
3519
3520 return bt_cb(skb)->req.start;
3521}
3522
Johan Hedberg42c6b122013-03-05 20:37:49 +02003523static void hci_resend_last(struct hci_dev *hdev)
3524{
3525 struct hci_command_hdr *sent;
3526 struct sk_buff *skb;
3527 u16 opcode;
3528
3529 if (!hdev->sent_cmd)
3530 return;
3531
3532 sent = (void *) hdev->sent_cmd->data;
3533 opcode = __le16_to_cpu(sent->opcode);
3534 if (opcode == HCI_OP_RESET)
3535 return;
3536
3537 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3538 if (!skb)
3539 return;
3540
3541 skb_queue_head(&hdev->cmd_q, skb);
3542 queue_work(hdev->workqueue, &hdev->cmd_work);
3543}
3544
Johan Hedberg9238f362013-03-05 20:37:48 +02003545void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3546{
3547 hci_req_complete_t req_complete = NULL;
3548 struct sk_buff *skb;
3549 unsigned long flags;
3550
3551 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3552
Johan Hedberg42c6b122013-03-05 20:37:49 +02003553 /* If the completed command doesn't match the last one that was
3554 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003555 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003556 if (!hci_sent_cmd_data(hdev, opcode)) {
3557 /* Some CSR based controllers generate a spontaneous
3558 * reset complete event during init and any pending
3559 * command will never be completed. In such a case we
3560 * need to resend whatever was the last sent
3561 * command.
3562 */
3563 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3564 hci_resend_last(hdev);
3565
Johan Hedberg9238f362013-03-05 20:37:48 +02003566 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003567 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003568
3569 /* If the command succeeded and there's still more commands in
3570 * this request the request is not yet complete.
3571 */
3572 if (!status && !hci_req_is_complete(hdev))
3573 return;
3574
3575 /* If this was the last command in a request the complete
3576 * callback would be found in hdev->sent_cmd instead of the
3577 * command queue (hdev->cmd_q).
3578 */
3579 if (hdev->sent_cmd) {
3580 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003581
3582 if (req_complete) {
3583 /* We must set the complete callback to NULL to
3584 * avoid calling the callback more than once if
3585 * this function gets called again.
3586 */
3587 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3588
Johan Hedberg9238f362013-03-05 20:37:48 +02003589 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003590 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003591 }
3592
3593 /* Remove all pending commands belonging to this request */
3594 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3595 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3596 if (bt_cb(skb)->req.start) {
3597 __skb_queue_head(&hdev->cmd_q, skb);
3598 break;
3599 }
3600
3601 req_complete = bt_cb(skb)->req.complete;
3602 kfree_skb(skb);
3603 }
3604 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3605
3606call_complete:
3607 if (req_complete)
3608 req_complete(hdev, status);
3609}
3610
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003611static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003613 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 struct sk_buff *skb;
3615
3616 BT_DBG("%s", hdev->name);
3617
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003619 /* Send copy to monitor */
3620 hci_send_to_monitor(hdev, skb);
3621
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 if (atomic_read(&hdev->promisc)) {
3623 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003624 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 }
3626
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003627 if (test_bit(HCI_RAW, &hdev->flags) ||
3628 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 kfree_skb(skb);
3630 continue;
3631 }
3632
3633 if (test_bit(HCI_INIT, &hdev->flags)) {
3634 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003635 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 case HCI_ACLDATA_PKT:
3637 case HCI_SCODATA_PKT:
3638 kfree_skb(skb);
3639 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 }
3642
3643 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003644 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003646 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 hci_event_packet(hdev, skb);
3648 break;
3649
3650 case HCI_ACLDATA_PKT:
3651 BT_DBG("%s ACL data packet", hdev->name);
3652 hci_acldata_packet(hdev, skb);
3653 break;
3654
3655 case HCI_SCODATA_PKT:
3656 BT_DBG("%s SCO data packet", hdev->name);
3657 hci_scodata_packet(hdev, skb);
3658 break;
3659
3660 default:
3661 kfree_skb(skb);
3662 break;
3663 }
3664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665}
3666
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003667static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003669 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 struct sk_buff *skb;
3671
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003672 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3673 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003676 if (atomic_read(&hdev->cmd_cnt)) {
3677 skb = skb_dequeue(&hdev->cmd_q);
3678 if (!skb)
3679 return;
3680
Wei Yongjun7585b972009-02-25 18:29:52 +08003681 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003683 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003684 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003686 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003687 if (test_bit(HCI_RESET, &hdev->flags))
3688 del_timer(&hdev->cmd_timer);
3689 else
3690 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003691 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 } else {
3693 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003694 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 }
3696 }
3697}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003698
Andre Guedes31f79562012-04-24 21:02:53 -03003699u8 bdaddr_to_le(u8 bdaddr_type)
3700{
3701 switch (bdaddr_type) {
3702 case BDADDR_LE_PUBLIC:
3703 return ADDR_LE_DEV_PUBLIC;
3704
3705 default:
3706 /* Fallback to LE Random address type */
3707 return ADDR_LE_DEV_RANDOM;
3708 }
3709}