blob: 8f70a35b4d0eb29ef82c411d89a662d1acc877f0 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200356 __le16 param;
357 __u8 flt_type;
358
359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200361
362 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200364
365 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200367
368 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200370
371 /* Clear Event Filters */
372 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200374
375 /* Connection accept timeout ~20 secs */
376 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378
Johan Hedbergf332ec62013-03-15 17:07:11 -0500379 /* Read page scan parameters */
380 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
382 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
383 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384}
385
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200387{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300388 struct hci_dev *hdev = req->hdev;
389
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395
396 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200401
402 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300404
405 /* LE-only controllers have LE implicitly enabled */
406 if (!lmp_bredr_capable(hdev))
407 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408}
409
410static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
411{
412 if (lmp_ext_inq_capable(hdev))
413 return 0x02;
414
415 if (lmp_inq_rssi_capable(hdev))
416 return 0x01;
417
418 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
419 hdev->lmp_subver == 0x0757)
420 return 0x01;
421
422 if (hdev->manufacturer == 15) {
423 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
424 return 0x01;
425 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
426 return 0x01;
427 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
428 return 0x01;
429 }
430
431 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
432 hdev->lmp_subver == 0x1805)
433 return 0x01;
434
435 return 0x00;
436}
437
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439{
440 u8 mode;
441
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445}
446
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200448{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 struct hci_dev *hdev = req->hdev;
450
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451 /* The second byte is 0xff instead of 0x9f (two reserved bits
452 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
453 * command otherwise.
454 */
455 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
456
457 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
458 * any event mask for pre 1.2 devices.
459 */
460 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
461 return;
462
463 if (lmp_bredr_capable(hdev)) {
464 events[4] |= 0x01; /* Flow Specification Complete */
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466 events[4] |= 0x04; /* Read Remote Extended Features Complete */
467 events[5] |= 0x08; /* Synchronous Connection Complete */
468 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700469 } else {
470 /* Use a different default for LE-only devices */
471 memset(events, 0, sizeof(events));
472 events[0] |= 0x10; /* Disconnection Complete */
473 events[0] |= 0x80; /* Encryption Change */
474 events[1] |= 0x08; /* Read Remote Version Information Complete */
475 events[1] |= 0x20; /* Command Complete */
476 events[1] |= 0x40; /* Command Status */
477 events[1] |= 0x80; /* Hardware Error */
478 events[2] |= 0x04; /* Number of Completed Packets */
479 events[3] |= 0x02; /* Data Buffer Overflow */
480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200481 }
482
483 if (lmp_inq_rssi_capable(hdev))
484 events[4] |= 0x02; /* Inquiry Result with RSSI */
485
486 if (lmp_sniffsubr_capable(hdev))
487 events[5] |= 0x20; /* Sniff Subrating */
488
489 if (lmp_pause_enc_capable(hdev))
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
491
492 if (lmp_ext_inq_capable(hdev))
493 events[5] |= 0x40; /* Extended Inquiry Result */
494
495 if (lmp_no_flush_capable(hdev))
496 events[7] |= 0x01; /* Enhanced Flush Complete */
497
498 if (lmp_lsto_capable(hdev))
499 events[6] |= 0x80; /* Link Supervision Timeout Changed */
500
501 if (lmp_ssp_capable(hdev)) {
502 events[6] |= 0x01; /* IO Capability Request */
503 events[6] |= 0x02; /* IO Capability Response */
504 events[6] |= 0x04; /* User Confirmation Request */
505 events[6] |= 0x08; /* User Passkey Request */
506 events[6] |= 0x10; /* Remote OOB Data Request */
507 events[6] |= 0x20; /* Simple Pairing Complete */
508 events[7] |= 0x04; /* User Passkey Notification */
509 events[7] |= 0x08; /* Keypress Notification */
510 events[7] |= 0x10; /* Remote Host Supported
511 * Features Notification
512 */
513 }
514
515 if (lmp_le_capable(hdev))
516 events[7] |= 0x20; /* LE Meta-Event */
517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519
520 if (lmp_le_capable(hdev)) {
521 memset(events, 0, sizeof(events));
522 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200523 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
524 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525 }
526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 struct hci_dev *hdev = req->hdev;
531
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300534 else
535 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300542 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
543 * local supported commands HCI command.
544 */
545 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547
548 if (lmp_ssp_capable(hdev)) {
549 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
550 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200551 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
552 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553 } else {
554 struct hci_cp_write_eir cp;
555
556 memset(hdev->eir, 0, sizeof(hdev->eir));
557 memset(&cp, 0, sizeof(cp));
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560 }
561 }
562
563 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200568
569 if (lmp_ext_feat_capable(hdev)) {
570 struct hci_cp_read_local_ext_features cp;
571
572 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
574 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 }
576
577 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
578 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
580 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 }
582}
583
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587 struct hci_cp_write_def_link_policy cp;
588 u16 link_policy = 0;
589
590 if (lmp_rswitch_capable(hdev))
591 link_policy |= HCI_LP_RSWITCH;
592 if (lmp_hold_capable(hdev))
593 link_policy |= HCI_LP_HOLD;
594 if (lmp_sniff_capable(hdev))
595 link_policy |= HCI_LP_SNIFF;
596 if (lmp_park_capable(hdev))
597 link_policy |= HCI_LP_PARK;
598
599 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601}
602
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200604{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606 struct hci_cp_write_le_host_supported cp;
607
Johan Hedbergc73eee92013-04-19 18:35:21 +0300608 /* LE-only devices do not support explicit enablement */
609 if (!lmp_bredr_capable(hdev))
610 return;
611
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612 memset(&cp, 0, sizeof(cp));
613
614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
615 cp.le = 0x01;
616 cp.simul = lmp_le_br_capable(hdev);
617 }
618
619 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
621 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622}
623
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300624static void hci_set_event_mask_page_2(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
628
629 /* If Connectionless Slave Broadcast master role is supported
630 * enable all necessary events for it.
631 */
632 if (hdev->features[2][0] & 0x01) {
633 events[1] |= 0x40; /* Triggered Clock Capture */
634 events[1] |= 0x80; /* Synchronization Train Complete */
635 events[2] |= 0x10; /* Slave Page Response Timeout */
636 events[2] |= 0x20; /* CSB Channel Map Change */
637 }
638
639 /* If Connectionless Slave Broadcast slave role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x02) {
643 events[2] |= 0x01; /* Synchronization Train Received */
644 events[2] |= 0x02; /* CSB Receive */
645 events[2] |= 0x04; /* CSB Timeout */
646 events[2] |= 0x08; /* Truncated Page Complete */
647 }
648
649 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
650}
651
Johan Hedberg42c6b122013-03-05 20:37:49 +0200652static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300655 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100657 /* Some Broadcom based Bluetooth controllers do not support the
658 * Delete Stored Link Key command. They are clearly indicating its
659 * absence in the bit mask of supported commands.
660 *
661 * Check the supported commands and only if the the command is marked
662 * as supported send it. If not supported assume that the controller
663 * does not have actual support for stored link keys which makes this
664 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700665 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300666 if (hdev->commands[6] & 0x80) {
667 struct hci_cp_delete_stored_link_key cp;
668
669 bacpy(&cp.bdaddr, BDADDR_ANY);
670 cp.delete_all = 0x01;
671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
672 sizeof(cp), &cp);
673 }
674
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500678 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500680 hci_update_ad(req);
681 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300682
683 /* Read features beyond page 1 if available */
684 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
685 struct hci_cp_read_local_ext_features cp;
686
687 cp.page = p;
688 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
689 sizeof(cp), &cp);
690 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691}
692
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300693static void hci_init4_req(struct hci_request *req, unsigned long opt)
694{
695 struct hci_dev *hdev = req->hdev;
696
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300697 /* Set event mask page 2 if the HCI command for it is supported */
698 if (hdev->commands[22] & 0x04)
699 hci_set_event_mask_page_2(req);
700
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300701 /* Check for Synchronization Train support */
702 if (hdev->features[2][0] & 0x04)
703 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
704}
705
Johan Hedberg2177bab2013-03-05 20:37:43 +0200706static int __hci_init(struct hci_dev *hdev)
707{
708 int err;
709
710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
711 if (err < 0)
712 return err;
713
714 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
715 * BR/EDR/LE type controllers. AMP controllers only need the
716 * first stage init.
717 */
718 if (hdev->dev_type != HCI_BREDR)
719 return 0;
720
721 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
722 if (err < 0)
723 return err;
724
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300725 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
726 if (err < 0)
727 return err;
728
729 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200730}
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 __u8 scan = opt;
735
Johan Hedberg42c6b122013-03-05 20:37:49 +0200736 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
744 __u8 auth = opt;
745
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753{
754 __u8 encrypt = opt;
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763{
764 __le16 policy = cpu_to_le16(opt);
765
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200767
768 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200769 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200770}
771
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900772/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * Device is held on return. */
774struct hci_dev *hci_dev_get(int index)
775{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200776 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 BT_DBG("%d", index);
779
780 if (index < 0)
781 return NULL;
782
783 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200784 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (d->id == index) {
786 hdev = hci_dev_hold(d);
787 break;
788 }
789 }
790 read_unlock(&hci_dev_list_lock);
791 return hdev;
792}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200795
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200796bool hci_discovery_active(struct hci_dev *hdev)
797{
798 struct discovery_state *discov = &hdev->discovery;
799
Andre Guedes6fbe1952012-02-03 17:47:58 -0300800 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300801 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300802 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803 return true;
804
Andre Guedes6fbe1952012-02-03 17:47:58 -0300805 default:
806 return false;
807 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200808}
809
Johan Hedbergff9ef572012-01-04 14:23:45 +0200810void hci_discovery_set_state(struct hci_dev *hdev, int state)
811{
812 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
813
814 if (hdev->discovery.state == state)
815 return;
816
817 switch (state) {
818 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300819 if (hdev->discovery.state != DISCOVERY_STARTING)
820 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200821 break;
822 case DISCOVERY_STARTING:
823 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300824 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200825 mgmt_discovering(hdev, 1);
826 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200827 case DISCOVERY_RESOLVING:
828 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200829 case DISCOVERY_STOPPING:
830 break;
831 }
832
833 hdev->discovery.state = state;
834}
835
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300836void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
Johan Hedberg30883512012-01-04 14:16:21 +0200838 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200839 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Johan Hedberg561aafb2012-01-04 13:31:59 +0200841 list_for_each_entry_safe(p, n, &cache->all, all) {
842 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200843 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200845
846 INIT_LIST_HEAD(&cache->unknown);
847 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
849
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300850struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
851 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
Johan Hedberg30883512012-01-04 14:16:21 +0200853 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 struct inquiry_entry *e;
855
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300856 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Johan Hedberg561aafb2012-01-04 13:31:59 +0200858 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200860 return e;
861 }
862
863 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
Johan Hedberg561aafb2012-01-04 13:31:59 +0200866struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300867 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200868{
Johan Hedberg30883512012-01-04 14:16:21 +0200869 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200870 struct inquiry_entry *e;
871
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300872 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200873
874 list_for_each_entry(e, &cache->unknown, list) {
875 if (!bacmp(&e->data.bdaddr, bdaddr))
876 return e;
877 }
878
879 return NULL;
880}
881
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200882struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300883 bdaddr_t *bdaddr,
884 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200885{
886 struct discovery_state *cache = &hdev->discovery;
887 struct inquiry_entry *e;
888
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300889 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200890
891 list_for_each_entry(e, &cache->resolve, list) {
892 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
893 return e;
894 if (!bacmp(&e->data.bdaddr, bdaddr))
895 return e;
896 }
897
898 return NULL;
899}
900
Johan Hedberga3d4e202012-01-09 00:53:02 +0200901void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300902 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200903{
904 struct discovery_state *cache = &hdev->discovery;
905 struct list_head *pos = &cache->resolve;
906 struct inquiry_entry *p;
907
908 list_del(&ie->list);
909
910 list_for_each_entry(p, &cache->resolve, list) {
911 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300912 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200913 break;
914 pos = &p->list;
915 }
916
917 list_add(&ie->list, pos);
918}
919
Johan Hedberg31754052012-01-04 13:39:52 +0200920bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300921 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Johan Hedberg30883512012-01-04 14:16:21 +0200923 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200924 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300926 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Szymon Janc2b2fec42012-11-20 11:38:54 +0100928 hci_remove_remote_oob_data(hdev, &data->bdaddr);
929
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200930 if (ssp)
931 *ssp = data->ssp_mode;
932
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200933 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200934 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200935 if (ie->data.ssp_mode && ssp)
936 *ssp = true;
937
Johan Hedberga3d4e202012-01-09 00:53:02 +0200938 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300939 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200940 ie->data.rssi = data->rssi;
941 hci_inquiry_cache_update_resolve(hdev, ie);
942 }
943
Johan Hedberg561aafb2012-01-04 13:31:59 +0200944 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200945 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200946
Johan Hedberg561aafb2012-01-04 13:31:59 +0200947 /* Entry not in the cache. Add new one. */
948 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
949 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200950 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200951
952 list_add(&ie->all, &cache->all);
953
954 if (name_known) {
955 ie->name_state = NAME_KNOWN;
956 } else {
957 ie->name_state = NAME_NOT_KNOWN;
958 list_add(&ie->list, &cache->unknown);
959 }
960
961update:
962 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300963 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200964 ie->name_state = NAME_KNOWN;
965 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200968 memcpy(&ie->data, data, sizeof(*data));
969 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200971
972 if (ie->name_state == NAME_NOT_KNOWN)
973 return false;
974
975 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
978static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
979{
Johan Hedberg30883512012-01-04 14:16:21 +0200980 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct inquiry_info *info = (struct inquiry_info *) buf;
982 struct inquiry_entry *e;
983 int copied = 0;
984
Johan Hedberg561aafb2012-01-04 13:31:59 +0200985 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987
988 if (copied >= num)
989 break;
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 bacpy(&info->bdaddr, &data->bdaddr);
992 info->pscan_rep_mode = data->pscan_rep_mode;
993 info->pscan_period_mode = data->pscan_period_mode;
994 info->pscan_mode = data->pscan_mode;
995 memcpy(info->dev_class, data->dev_class, 3);
996 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200999 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 }
1001
1002 BT_DBG("cache %p, copied %d", cache, copied);
1003 return copied;
1004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
1008 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 struct hci_cp_inquiry cp;
1011
1012 BT_DBG("%s", hdev->name);
1013
1014 if (test_bit(HCI_INQUIRY, &hdev->flags))
1015 return;
1016
1017 /* Start Inquiry */
1018 memcpy(&cp.lap, &ir->lap, 3);
1019 cp.length = ir->length;
1020 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001021 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022}
1023
Andre Guedes3e13fa12013-03-27 20:04:56 -03001024static int wait_inquiry(void *word)
1025{
1026 schedule();
1027 return signal_pending(current);
1028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030int hci_inquiry(void __user *arg)
1031{
1032 __u8 __user *ptr = arg;
1033 struct hci_inquiry_req ir;
1034 struct hci_dev *hdev;
1035 int err = 0, do_inquiry = 0, max_rsp;
1036 long timeo;
1037 __u8 *buf;
1038
1039 if (copy_from_user(&ir, ptr, sizeof(ir)))
1040 return -EFAULT;
1041
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001042 hdev = hci_dev_get(ir.dev_id);
1043 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return -ENODEV;
1045
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001046 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1047 err = -EBUSY;
1048 goto done;
1049 }
1050
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001051 if (hdev->dev_type != HCI_BREDR) {
1052 err = -EOPNOTSUPP;
1053 goto done;
1054 }
1055
Johan Hedberg56f87902013-10-02 13:43:13 +03001056 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1057 err = -EOPNOTSUPP;
1058 goto done;
1059 }
1060
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001061 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001062 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001063 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001064 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 do_inquiry = 1;
1066 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001067 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Marcel Holtmann04837f62006-07-03 10:02:33 +02001069 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070
1071 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001072 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1073 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001074 if (err < 0)
1075 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001076
1077 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1078 * cleared). If it is interrupted by a signal, return -EINTR.
1079 */
1080 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1081 TASK_INTERRUPTIBLE))
1082 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001085 /* for unlimited number of responses we will use buffer with
1086 * 255 entries
1087 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1089
1090 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1091 * copy it to the user space.
1092 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001093 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001094 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 err = -ENOMEM;
1096 goto done;
1097 }
1098
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001099 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001101 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
1103 BT_DBG("num_rsp %d", ir.num_rsp);
1104
1105 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1106 ptr += sizeof(ir);
1107 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001108 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001110 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 err = -EFAULT;
1112
1113 kfree(buf);
1114
1115done:
1116 hci_dev_put(hdev);
1117 return err;
1118}
1119
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001120static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1121{
1122 u8 ad_len = 0, flags = 0;
1123 size_t name_len;
1124
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001125 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001126 flags |= LE_AD_GENERAL;
1127
Johan Hedberg11802b22013-10-02 16:02:24 +03001128 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1129 if (lmp_le_br_capable(hdev))
1130 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1131 if (lmp_host_le_br_capable(hdev))
1132 flags |= LE_AD_SIM_LE_BREDR_HOST;
1133 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001134 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001135 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001136
1137 if (flags) {
1138 BT_DBG("adv flags 0x%02x", flags);
1139
1140 ptr[0] = 2;
1141 ptr[1] = EIR_FLAGS;
1142 ptr[2] = flags;
1143
1144 ad_len += 3;
1145 ptr += 3;
1146 }
1147
1148 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1149 ptr[0] = 2;
1150 ptr[1] = EIR_TX_POWER;
1151 ptr[2] = (u8) hdev->adv_tx_power;
1152
1153 ad_len += 3;
1154 ptr += 3;
1155 }
1156
1157 name_len = strlen(hdev->dev_name);
1158 if (name_len > 0) {
1159 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1160
1161 if (name_len > max_len) {
1162 name_len = max_len;
1163 ptr[1] = EIR_NAME_SHORT;
1164 } else
1165 ptr[1] = EIR_NAME_COMPLETE;
1166
1167 ptr[0] = name_len + 1;
1168
1169 memcpy(ptr + 2, hdev->dev_name, name_len);
1170
1171 ad_len += (name_len + 2);
1172 ptr += (name_len + 2);
1173 }
1174
1175 return ad_len;
1176}
1177
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001178void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001179{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001180 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001181 struct hci_cp_le_set_adv_data cp;
1182 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001183
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001184 if (!lmp_le_capable(hdev))
1185 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001186
1187 memset(&cp, 0, sizeof(cp));
1188
1189 len = create_ad(hdev, cp.data);
1190
1191 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001192 memcmp(cp.data, hdev->adv_data, len) == 0)
1193 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001194
1195 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1196 hdev->adv_data_len = len;
1197
1198 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001199
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001200 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001201}
1202
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001203static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 int ret = 0;
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 BT_DBG("%s %p", hdev->name, hdev);
1208
1209 hci_req_lock(hdev);
1210
Johan Hovold94324962012-03-15 14:48:41 +01001211 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1212 ret = -ENODEV;
1213 goto done;
1214 }
1215
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001216 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1217 /* Check for rfkill but allow the HCI setup stage to
1218 * proceed (which in itself doesn't cause any RF activity).
1219 */
1220 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1221 ret = -ERFKILL;
1222 goto done;
1223 }
1224
1225 /* Check for valid public address or a configured static
1226 * random adddress, but let the HCI setup proceed to
1227 * be able to determine if there is a public address
1228 * or not.
1229 *
1230 * This check is only valid for BR/EDR controllers
1231 * since AMP controllers do not have an address.
1232 */
1233 if (hdev->dev_type == HCI_BREDR &&
1234 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1235 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1236 ret = -EADDRNOTAVAIL;
1237 goto done;
1238 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001239 }
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (test_bit(HCI_UP, &hdev->flags)) {
1242 ret = -EALREADY;
1243 goto done;
1244 }
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 if (hdev->open(hdev)) {
1247 ret = -EIO;
1248 goto done;
1249 }
1250
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001251 atomic_set(&hdev->cmd_cnt, 1);
1252 set_bit(HCI_INIT, &hdev->flags);
1253
1254 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1255 ret = hdev->setup(hdev);
1256
1257 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001258 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1259 set_bit(HCI_RAW, &hdev->flags);
1260
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001261 if (!test_bit(HCI_RAW, &hdev->flags) &&
1262 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001263 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 }
1265
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001266 clear_bit(HCI_INIT, &hdev->flags);
1267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 if (!ret) {
1269 hci_dev_hold(hdev);
1270 set_bit(HCI_UP, &hdev->flags);
1271 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001272 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001273 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001274 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001275 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001276 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001277 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001278 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001279 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001281 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001282 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001283 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
1285 skb_queue_purge(&hdev->cmd_q);
1286 skb_queue_purge(&hdev->rx_q);
1287
1288 if (hdev->flush)
1289 hdev->flush(hdev);
1290
1291 if (hdev->sent_cmd) {
1292 kfree_skb(hdev->sent_cmd);
1293 hdev->sent_cmd = NULL;
1294 }
1295
1296 hdev->close(hdev);
1297 hdev->flags = 0;
1298 }
1299
1300done:
1301 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return ret;
1303}
1304
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001305/* ---- HCI ioctl helpers ---- */
1306
1307int hci_dev_open(__u16 dev)
1308{
1309 struct hci_dev *hdev;
1310 int err;
1311
1312 hdev = hci_dev_get(dev);
1313 if (!hdev)
1314 return -ENODEV;
1315
Johan Hedberge1d08f42013-10-01 22:44:50 +03001316 /* We need to ensure that no other power on/off work is pending
1317 * before proceeding to call hci_dev_do_open. This is
1318 * particularly important if the setup procedure has not yet
1319 * completed.
1320 */
1321 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1322 cancel_delayed_work(&hdev->power_off);
1323
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001324 /* After this call it is guaranteed that the setup procedure
1325 * has finished. This means that error conditions like RFKILL
1326 * or no valid public or static random address apply.
1327 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001328 flush_workqueue(hdev->req_workqueue);
1329
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001330 err = hci_dev_do_open(hdev);
1331
1332 hci_dev_put(hdev);
1333
1334 return err;
1335}
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337static int hci_dev_do_close(struct hci_dev *hdev)
1338{
1339 BT_DBG("%s %p", hdev->name, hdev);
1340
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001341 cancel_delayed_work(&hdev->power_off);
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 hci_req_cancel(hdev, ENODEV);
1344 hci_req_lock(hdev);
1345
1346 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001347 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 hci_req_unlock(hdev);
1349 return 0;
1350 }
1351
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001352 /* Flush RX and TX works */
1353 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001354 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001356 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001357 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001358 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001359 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001360 }
1361
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001363 cancel_delayed_work(&hdev->service_cache);
1364
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001365 cancel_delayed_work_sync(&hdev->le_scan_disable);
1366
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001367 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001368 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001370 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372 hci_notify(hdev, HCI_DEV_DOWN);
1373
1374 if (hdev->flush)
1375 hdev->flush(hdev);
1376
1377 /* Reset device */
1378 skb_queue_purge(&hdev->cmd_q);
1379 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001380 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001381 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001382 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001384 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 clear_bit(HCI_INIT, &hdev->flags);
1386 }
1387
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001388 /* flush cmd work */
1389 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
1391 /* Drop queues */
1392 skb_queue_purge(&hdev->rx_q);
1393 skb_queue_purge(&hdev->cmd_q);
1394 skb_queue_purge(&hdev->raw_q);
1395
1396 /* Drop last sent command */
1397 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001398 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 kfree_skb(hdev->sent_cmd);
1400 hdev->sent_cmd = NULL;
1401 }
1402
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001403 kfree_skb(hdev->recv_evt);
1404 hdev->recv_evt = NULL;
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 /* After this point our queues are empty
1407 * and no tasks are scheduled. */
1408 hdev->close(hdev);
1409
Johan Hedberg35b973c2013-03-15 17:06:59 -05001410 /* Clear flags */
1411 hdev->flags = 0;
1412 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1413
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001414 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1415 if (hdev->dev_type == HCI_BREDR) {
1416 hci_dev_lock(hdev);
1417 mgmt_powered(hdev, 0);
1418 hci_dev_unlock(hdev);
1419 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001420 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001421
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001422 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001423 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001424
Johan Hedberge59fda82012-02-22 18:11:53 +02001425 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001426 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 hci_req_unlock(hdev);
1429
1430 hci_dev_put(hdev);
1431 return 0;
1432}
1433
1434int hci_dev_close(__u16 dev)
1435{
1436 struct hci_dev *hdev;
1437 int err;
1438
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001439 hdev = hci_dev_get(dev);
1440 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001442
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001443 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1444 err = -EBUSY;
1445 goto done;
1446 }
1447
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001448 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1449 cancel_delayed_work(&hdev->power_off);
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001452
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001453done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 hci_dev_put(hdev);
1455 return err;
1456}
1457
1458int hci_dev_reset(__u16 dev)
1459{
1460 struct hci_dev *hdev;
1461 int ret = 0;
1462
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001463 hdev = hci_dev_get(dev);
1464 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 return -ENODEV;
1466
1467 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Marcel Holtmann808a0492013-08-26 20:57:58 -07001469 if (!test_bit(HCI_UP, &hdev->flags)) {
1470 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001472 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001474 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1475 ret = -EBUSY;
1476 goto done;
1477 }
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 /* Drop queues */
1480 skb_queue_purge(&hdev->rx_q);
1481 skb_queue_purge(&hdev->cmd_q);
1482
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001483 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001484 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001486 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 if (hdev->flush)
1489 hdev->flush(hdev);
1490
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001491 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001492 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
1494 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001495 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 hci_req_unlock(hdev);
1499 hci_dev_put(hdev);
1500 return ret;
1501}
1502
1503int hci_dev_reset_stat(__u16 dev)
1504{
1505 struct hci_dev *hdev;
1506 int ret = 0;
1507
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001508 hdev = hci_dev_get(dev);
1509 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 return -ENODEV;
1511
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001512 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1513 ret = -EBUSY;
1514 goto done;
1515 }
1516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1518
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001519done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523
1524int hci_dev_cmd(unsigned int cmd, void __user *arg)
1525{
1526 struct hci_dev *hdev;
1527 struct hci_dev_req dr;
1528 int err = 0;
1529
1530 if (copy_from_user(&dr, arg, sizeof(dr)))
1531 return -EFAULT;
1532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001533 hdev = hci_dev_get(dr.dev_id);
1534 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 return -ENODEV;
1536
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538 err = -EBUSY;
1539 goto done;
1540 }
1541
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001542 if (hdev->dev_type != HCI_BREDR) {
1543 err = -EOPNOTSUPP;
1544 goto done;
1545 }
1546
Johan Hedberg56f87902013-10-02 13:43:13 +03001547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1548 err = -EOPNOTSUPP;
1549 goto done;
1550 }
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 switch (cmd) {
1553 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001554 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1555 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 break;
1557
1558 case HCISETENCRYPT:
1559 if (!lmp_encrypt_capable(hdev)) {
1560 err = -EOPNOTSUPP;
1561 break;
1562 }
1563
1564 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1565 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1567 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 if (err)
1569 break;
1570 }
1571
Johan Hedberg01178cd2013-03-05 20:37:41 +02001572 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1573 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 break;
1575
1576 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001577 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1578 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 break;
1580
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001581 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001582 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1583 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001584 break;
1585
1586 case HCISETLINKMODE:
1587 hdev->link_mode = ((__u16) dr.dev_opt) &
1588 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1589 break;
1590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 case HCISETPTYPE:
1592 hdev->pkt_type = (__u16) dr.dev_opt;
1593 break;
1594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001596 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1597 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 break;
1599
1600 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001601 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1602 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 break;
1604
1605 default:
1606 err = -EINVAL;
1607 break;
1608 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001609
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001610done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 hci_dev_put(hdev);
1612 return err;
1613}
1614
1615int hci_get_dev_list(void __user *arg)
1616{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001617 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 struct hci_dev_list_req *dl;
1619 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 int n = 0, size, err;
1621 __u16 dev_num;
1622
1623 if (get_user(dev_num, (__u16 __user *) arg))
1624 return -EFAULT;
1625
1626 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1627 return -EINVAL;
1628
1629 size = sizeof(*dl) + dev_num * sizeof(*dr);
1630
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001631 dl = kzalloc(size, GFP_KERNEL);
1632 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 return -ENOMEM;
1634
1635 dr = dl->dev_req;
1636
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001637 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001638 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001639 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001640 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001641
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001642 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1643 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 (dr + n)->dev_id = hdev->id;
1646 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 if (++n >= dev_num)
1649 break;
1650 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001651 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
1653 dl->dev_num = n;
1654 size = sizeof(*dl) + n * sizeof(*dr);
1655
1656 err = copy_to_user(arg, dl, size);
1657 kfree(dl);
1658
1659 return err ? -EFAULT : 0;
1660}
1661
1662int hci_get_dev_info(void __user *arg)
1663{
1664 struct hci_dev *hdev;
1665 struct hci_dev_info di;
1666 int err = 0;
1667
1668 if (copy_from_user(&di, arg, sizeof(di)))
1669 return -EFAULT;
1670
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001671 hdev = hci_dev_get(di.dev_id);
1672 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 return -ENODEV;
1674
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001675 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001676 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001677
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001678 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1679 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001680
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 strcpy(di.name, hdev->name);
1682 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001683 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 di.flags = hdev->flags;
1685 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001686 if (lmp_bredr_capable(hdev)) {
1687 di.acl_mtu = hdev->acl_mtu;
1688 di.acl_pkts = hdev->acl_pkts;
1689 di.sco_mtu = hdev->sco_mtu;
1690 di.sco_pkts = hdev->sco_pkts;
1691 } else {
1692 di.acl_mtu = hdev->le_mtu;
1693 di.acl_pkts = hdev->le_pkts;
1694 di.sco_mtu = 0;
1695 di.sco_pkts = 0;
1696 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 di.link_policy = hdev->link_policy;
1698 di.link_mode = hdev->link_mode;
1699
1700 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1701 memcpy(&di.features, &hdev->features, sizeof(di.features));
1702
1703 if (copy_to_user(arg, &di, sizeof(di)))
1704 err = -EFAULT;
1705
1706 hci_dev_put(hdev);
1707
1708 return err;
1709}
1710
1711/* ---- Interface to HCI drivers ---- */
1712
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001713static int hci_rfkill_set_block(void *data, bool blocked)
1714{
1715 struct hci_dev *hdev = data;
1716
1717 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1718
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001719 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1720 return -EBUSY;
1721
Johan Hedberg5e130362013-09-13 08:58:17 +03001722 if (blocked) {
1723 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001726 } else {
1727 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001728 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001729
1730 return 0;
1731}
1732
1733static const struct rfkill_ops hci_rfkill_ops = {
1734 .set_block = hci_rfkill_set_block,
1735};
1736
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001737static void hci_power_on(struct work_struct *work)
1738{
1739 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001740 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001741
1742 BT_DBG("%s", hdev->name);
1743
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001744 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001745 if (err < 0) {
1746 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001747 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001748 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001749
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001750 /* During the HCI setup phase, a few error conditions are
1751 * ignored and they need to be checked now. If they are still
1752 * valid, it is important to turn the device back off.
1753 */
1754 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1755 (hdev->dev_type == HCI_BREDR &&
1756 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1757 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001758 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1759 hci_dev_do_close(hdev);
1760 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001761 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1762 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001763 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001764
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001765 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001766 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001767}
1768
1769static void hci_power_off(struct work_struct *work)
1770{
Johan Hedberg32435532011-11-07 22:16:04 +02001771 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001772 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001773
1774 BT_DBG("%s", hdev->name);
1775
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001776 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001777}
1778
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001779static void hci_discov_off(struct work_struct *work)
1780{
1781 struct hci_dev *hdev;
1782 u8 scan = SCAN_PAGE;
1783
1784 hdev = container_of(work, struct hci_dev, discov_off.work);
1785
1786 BT_DBG("%s", hdev->name);
1787
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001788 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001789
1790 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1791
1792 hdev->discov_timeout = 0;
1793
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001794 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001795}
1796
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001797int hci_uuids_clear(struct hci_dev *hdev)
1798{
Johan Hedberg48210022013-01-27 00:31:28 +02001799 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001800
Johan Hedberg48210022013-01-27 00:31:28 +02001801 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1802 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001803 kfree(uuid);
1804 }
1805
1806 return 0;
1807}
1808
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001809int hci_link_keys_clear(struct hci_dev *hdev)
1810{
1811 struct list_head *p, *n;
1812
1813 list_for_each_safe(p, n, &hdev->link_keys) {
1814 struct link_key *key;
1815
1816 key = list_entry(p, struct link_key, list);
1817
1818 list_del(p);
1819 kfree(key);
1820 }
1821
1822 return 0;
1823}
1824
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001825int hci_smp_ltks_clear(struct hci_dev *hdev)
1826{
1827 struct smp_ltk *k, *tmp;
1828
1829 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1830 list_del(&k->list);
1831 kfree(k);
1832 }
1833
1834 return 0;
1835}
1836
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001837struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1838{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001839 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001840
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001841 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001842 if (bacmp(bdaddr, &k->bdaddr) == 0)
1843 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001844
1845 return NULL;
1846}
1847
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301848static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001849 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001850{
1851 /* Legacy key */
1852 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301853 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001854
1855 /* Debug keys are insecure so don't store them persistently */
1856 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301857 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001858
1859 /* Changed combination key and there's no previous one */
1860 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301861 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001862
1863 /* Security mode 3 case */
1864 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301865 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001866
1867 /* Neither local nor remote side had no-bonding as requirement */
1868 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301869 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001870
1871 /* Local side had dedicated bonding as requirement */
1872 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301873 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001874
1875 /* Remote side had dedicated bonding as requirement */
1876 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301877 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001878
1879 /* If none of the above criteria match, then don't store the key
1880 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301881 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001882}
1883
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001884struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001885{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001886 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001887
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001888 list_for_each_entry(k, &hdev->long_term_keys, list) {
1889 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001890 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001891 continue;
1892
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001893 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001894 }
1895
1896 return NULL;
1897}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001898
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001899struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001900 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001901{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001902 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001903
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001904 list_for_each_entry(k, &hdev->long_term_keys, list)
1905 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001906 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907 return k;
1908
1909 return NULL;
1910}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001911
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001912int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001913 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001914{
1915 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301916 u8 old_key_type;
1917 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001918
1919 old_key = hci_find_link_key(hdev, bdaddr);
1920 if (old_key) {
1921 old_key_type = old_key->type;
1922 key = old_key;
1923 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001924 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001925 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1926 if (!key)
1927 return -ENOMEM;
1928 list_add(&key->list, &hdev->link_keys);
1929 }
1930
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001931 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001932
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001933 /* Some buggy controller combinations generate a changed
1934 * combination key for legacy pairing even when there's no
1935 * previous key */
1936 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001937 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001938 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001939 if (conn)
1940 conn->key_type = type;
1941 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001942
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001943 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001944 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001945 key->pin_len = pin_len;
1946
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001947 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001948 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001949 else
1950 key->type = type;
1951
Johan Hedberg4df378a2011-04-28 11:29:03 -07001952 if (!new_key)
1953 return 0;
1954
1955 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1956
Johan Hedberg744cf192011-11-08 20:40:14 +02001957 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001958
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301959 if (conn)
1960 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001961
1962 return 0;
1963}
1964
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001965int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001966 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001967 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001968{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001969 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001970
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001971 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1972 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001973
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001974 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1975 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001976 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001977 else {
1978 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001979 if (!key)
1980 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001981 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001982 }
1983
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001984 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001985 key->bdaddr_type = addr_type;
1986 memcpy(key->val, tk, sizeof(key->val));
1987 key->authenticated = authenticated;
1988 key->ediv = ediv;
1989 key->enc_size = enc_size;
1990 key->type = type;
1991 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001992
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001993 if (!new_key)
1994 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001995
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001996 if (type & HCI_SMP_LTK)
1997 mgmt_new_ltk(hdev, key, 1);
1998
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001999 return 0;
2000}
2001
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002002int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2003{
2004 struct link_key *key;
2005
2006 key = hci_find_link_key(hdev, bdaddr);
2007 if (!key)
2008 return -ENOENT;
2009
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002010 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002011
2012 list_del(&key->list);
2013 kfree(key);
2014
2015 return 0;
2016}
2017
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002018int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2019{
2020 struct smp_ltk *k, *tmp;
2021
2022 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2023 if (bacmp(bdaddr, &k->bdaddr))
2024 continue;
2025
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002026 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002027
2028 list_del(&k->list);
2029 kfree(k);
2030 }
2031
2032 return 0;
2033}
2034
Ville Tervo6bd32322011-02-16 16:32:41 +02002035/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002036static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002037{
2038 struct hci_dev *hdev = (void *) arg;
2039
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002040 if (hdev->sent_cmd) {
2041 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2042 u16 opcode = __le16_to_cpu(sent->opcode);
2043
2044 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2045 } else {
2046 BT_ERR("%s command tx timeout", hdev->name);
2047 }
2048
Ville Tervo6bd32322011-02-16 16:32:41 +02002049 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002050 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002051}
2052
Szymon Janc2763eda2011-03-22 13:12:22 +01002053struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002054 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002055{
2056 struct oob_data *data;
2057
2058 list_for_each_entry(data, &hdev->remote_oob_data, list)
2059 if (bacmp(bdaddr, &data->bdaddr) == 0)
2060 return data;
2061
2062 return NULL;
2063}
2064
2065int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2066{
2067 struct oob_data *data;
2068
2069 data = hci_find_remote_oob_data(hdev, bdaddr);
2070 if (!data)
2071 return -ENOENT;
2072
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002073 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002074
2075 list_del(&data->list);
2076 kfree(data);
2077
2078 return 0;
2079}
2080
2081int hci_remote_oob_data_clear(struct hci_dev *hdev)
2082{
2083 struct oob_data *data, *n;
2084
2085 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2086 list_del(&data->list);
2087 kfree(data);
2088 }
2089
2090 return 0;
2091}
2092
2093int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002094 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002095{
2096 struct oob_data *data;
2097
2098 data = hci_find_remote_oob_data(hdev, bdaddr);
2099
2100 if (!data) {
2101 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2102 if (!data)
2103 return -ENOMEM;
2104
2105 bacpy(&data->bdaddr, bdaddr);
2106 list_add(&data->list, &hdev->remote_oob_data);
2107 }
2108
2109 memcpy(data->hash, hash, sizeof(data->hash));
2110 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2111
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002112 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002113
2114 return 0;
2115}
2116
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002117struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002118{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002119 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002120
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002121 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002122 if (bacmp(bdaddr, &b->bdaddr) == 0)
2123 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002124
2125 return NULL;
2126}
2127
2128int hci_blacklist_clear(struct hci_dev *hdev)
2129{
2130 struct list_head *p, *n;
2131
2132 list_for_each_safe(p, n, &hdev->blacklist) {
2133 struct bdaddr_list *b;
2134
2135 b = list_entry(p, struct bdaddr_list, list);
2136
2137 list_del(p);
2138 kfree(b);
2139 }
2140
2141 return 0;
2142}
2143
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002144int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002145{
2146 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002147
2148 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2149 return -EBADF;
2150
Antti Julku5e762442011-08-25 16:48:02 +03002151 if (hci_blacklist_lookup(hdev, bdaddr))
2152 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002153
2154 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002155 if (!entry)
2156 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002157
2158 bacpy(&entry->bdaddr, bdaddr);
2159
2160 list_add(&entry->list, &hdev->blacklist);
2161
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002162 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002163}
2164
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002165int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002166{
2167 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002168
Szymon Janc1ec918c2011-11-16 09:32:21 +01002169 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002170 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002171
2172 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002173 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002174 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002175
2176 list_del(&entry->list);
2177 kfree(entry);
2178
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002179 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002180}
2181
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002182static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002183{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002184 if (status) {
2185 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002186
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002187 hci_dev_lock(hdev);
2188 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2189 hci_dev_unlock(hdev);
2190 return;
2191 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002192}
2193
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002194static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002195{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002196 /* General inquiry access code (GIAC) */
2197 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2198 struct hci_request req;
2199 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002200 int err;
2201
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002202 if (status) {
2203 BT_ERR("Failed to disable LE scanning: status %d", status);
2204 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002205 }
2206
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002207 switch (hdev->discovery.type) {
2208 case DISCOV_TYPE_LE:
2209 hci_dev_lock(hdev);
2210 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2211 hci_dev_unlock(hdev);
2212 break;
2213
2214 case DISCOV_TYPE_INTERLEAVED:
2215 hci_req_init(&req, hdev);
2216
2217 memset(&cp, 0, sizeof(cp));
2218 memcpy(&cp.lap, lap, sizeof(cp.lap));
2219 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2220 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2221
2222 hci_dev_lock(hdev);
2223
2224 hci_inquiry_cache_flush(hdev);
2225
2226 err = hci_req_run(&req, inquiry_complete);
2227 if (err) {
2228 BT_ERR("Inquiry request failed: err %d", err);
2229 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2230 }
2231
2232 hci_dev_unlock(hdev);
2233 break;
2234 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002235}
2236
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002237static void le_scan_disable_work(struct work_struct *work)
2238{
2239 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002240 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002241 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002242 struct hci_request req;
2243 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002244
2245 BT_DBG("%s", hdev->name);
2246
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002247 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002248
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002249 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002250 cp.enable = LE_SCAN_DISABLE;
2251 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002252
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002253 err = hci_req_run(&req, le_scan_disable_work_complete);
2254 if (err)
2255 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002256}
2257
David Herrmann9be0dab2012-04-22 14:39:57 +02002258/* Alloc HCI device */
2259struct hci_dev *hci_alloc_dev(void)
2260{
2261 struct hci_dev *hdev;
2262
2263 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2264 if (!hdev)
2265 return NULL;
2266
David Herrmannb1b813d2012-04-22 14:39:58 +02002267 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2268 hdev->esco_type = (ESCO_HV1);
2269 hdev->link_mode = (HCI_LM_ACCEPT);
2270 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002271 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2272 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002273
David Herrmannb1b813d2012-04-22 14:39:58 +02002274 hdev->sniff_max_interval = 800;
2275 hdev->sniff_min_interval = 80;
2276
Marcel Holtmannbef64732013-10-11 08:23:19 -07002277 hdev->le_scan_interval = 0x0060;
2278 hdev->le_scan_window = 0x0030;
2279
David Herrmannb1b813d2012-04-22 14:39:58 +02002280 mutex_init(&hdev->lock);
2281 mutex_init(&hdev->req_lock);
2282
2283 INIT_LIST_HEAD(&hdev->mgmt_pending);
2284 INIT_LIST_HEAD(&hdev->blacklist);
2285 INIT_LIST_HEAD(&hdev->uuids);
2286 INIT_LIST_HEAD(&hdev->link_keys);
2287 INIT_LIST_HEAD(&hdev->long_term_keys);
2288 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002289 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002290
2291 INIT_WORK(&hdev->rx_work, hci_rx_work);
2292 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2293 INIT_WORK(&hdev->tx_work, hci_tx_work);
2294 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002295
David Herrmannb1b813d2012-04-22 14:39:58 +02002296 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2297 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2298 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2299
David Herrmannb1b813d2012-04-22 14:39:58 +02002300 skb_queue_head_init(&hdev->rx_q);
2301 skb_queue_head_init(&hdev->cmd_q);
2302 skb_queue_head_init(&hdev->raw_q);
2303
2304 init_waitqueue_head(&hdev->req_wait_q);
2305
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002306 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002307
David Herrmannb1b813d2012-04-22 14:39:58 +02002308 hci_init_sysfs(hdev);
2309 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002310
2311 return hdev;
2312}
2313EXPORT_SYMBOL(hci_alloc_dev);
2314
2315/* Free HCI device */
2316void hci_free_dev(struct hci_dev *hdev)
2317{
David Herrmann9be0dab2012-04-22 14:39:57 +02002318 /* will free via device release */
2319 put_device(&hdev->dev);
2320}
2321EXPORT_SYMBOL(hci_free_dev);
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323/* Register HCI device */
2324int hci_register_dev(struct hci_dev *hdev)
2325{
David Herrmannb1b813d2012-04-22 14:39:58 +02002326 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
David Herrmann010666a2012-01-07 15:47:07 +01002328 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 return -EINVAL;
2330
Mat Martineau08add512011-11-02 16:18:36 -07002331 /* Do not allow HCI_AMP devices to register at index 0,
2332 * so the index can be used as the AMP controller ID.
2333 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002334 switch (hdev->dev_type) {
2335 case HCI_BREDR:
2336 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2337 break;
2338 case HCI_AMP:
2339 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2340 break;
2341 default:
2342 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002344
Sasha Levin3df92b32012-05-27 22:36:56 +02002345 if (id < 0)
2346 return id;
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 sprintf(hdev->name, "hci%d", id);
2349 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002350
2351 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2352
Kees Cookd8537542013-07-03 15:04:57 -07002353 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2354 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002355 if (!hdev->workqueue) {
2356 error = -ENOMEM;
2357 goto err;
2358 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002359
Kees Cookd8537542013-07-03 15:04:57 -07002360 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2361 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002362 if (!hdev->req_workqueue) {
2363 destroy_workqueue(hdev->workqueue);
2364 error = -ENOMEM;
2365 goto err;
2366 }
2367
David Herrmann33ca9542011-10-08 14:58:49 +02002368 error = hci_add_sysfs(hdev);
2369 if (error < 0)
2370 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002372 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002373 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2374 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002375 if (hdev->rfkill) {
2376 if (rfkill_register(hdev->rfkill) < 0) {
2377 rfkill_destroy(hdev->rfkill);
2378 hdev->rfkill = NULL;
2379 }
2380 }
2381
Johan Hedberg5e130362013-09-13 08:58:17 +03002382 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2383 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2384
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002385 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002386 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002387
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002388 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002389 /* Assume BR/EDR support until proven otherwise (such as
2390 * through reading supported features during init.
2391 */
2392 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2393 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002394
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002395 write_lock(&hci_dev_list_lock);
2396 list_add(&hdev->list, &hci_dev_list);
2397 write_unlock(&hci_dev_list_lock);
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002400 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
Johan Hedberg19202572013-01-14 22:33:51 +02002402 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002403
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002405
David Herrmann33ca9542011-10-08 14:58:49 +02002406err_wqueue:
2407 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002408 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002409err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002410 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002411
David Herrmann33ca9542011-10-08 14:58:49 +02002412 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413}
2414EXPORT_SYMBOL(hci_register_dev);
2415
2416/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002417void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418{
Sasha Levin3df92b32012-05-27 22:36:56 +02002419 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002420
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002421 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
Johan Hovold94324962012-03-15 14:48:41 +01002423 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2424
Sasha Levin3df92b32012-05-27 22:36:56 +02002425 id = hdev->id;
2426
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002427 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002429 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
2431 hci_dev_do_close(hdev);
2432
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302433 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002434 kfree_skb(hdev->reassembly[i]);
2435
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002436 cancel_work_sync(&hdev->power_on);
2437
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002438 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002439 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002440 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002441 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002442 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002443 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002444
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002445 /* mgmt_index_removed should take care of emptying the
2446 * pending list */
2447 BUG_ON(!list_empty(&hdev->mgmt_pending));
2448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 hci_notify(hdev, HCI_DEV_UNREG);
2450
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002451 if (hdev->rfkill) {
2452 rfkill_unregister(hdev->rfkill);
2453 rfkill_destroy(hdev->rfkill);
2454 }
2455
David Herrmannce242972011-10-08 14:58:48 +02002456 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002457
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002458 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002459 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002460
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002461 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002462 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002463 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002464 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002465 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002466 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002467 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002468
David Herrmanndc946bd2012-01-07 15:47:24 +01002469 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002470
2471 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472}
2473EXPORT_SYMBOL(hci_unregister_dev);
2474
2475/* Suspend HCI device */
2476int hci_suspend_dev(struct hci_dev *hdev)
2477{
2478 hci_notify(hdev, HCI_DEV_SUSPEND);
2479 return 0;
2480}
2481EXPORT_SYMBOL(hci_suspend_dev);
2482
2483/* Resume HCI device */
2484int hci_resume_dev(struct hci_dev *hdev)
2485{
2486 hci_notify(hdev, HCI_DEV_RESUME);
2487 return 0;
2488}
2489EXPORT_SYMBOL(hci_resume_dev);
2490
Marcel Holtmann76bca882009-11-18 00:40:39 +01002491/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002492int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002493{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002494 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002495 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002496 kfree_skb(skb);
2497 return -ENXIO;
2498 }
2499
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002500 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002501 bt_cb(skb)->incoming = 1;
2502
2503 /* Time stamp */
2504 __net_timestamp(skb);
2505
Marcel Holtmann76bca882009-11-18 00:40:39 +01002506 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002507 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002508
Marcel Holtmann76bca882009-11-18 00:40:39 +01002509 return 0;
2510}
2511EXPORT_SYMBOL(hci_recv_frame);
2512
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302513static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002514 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302515{
2516 int len = 0;
2517 int hlen = 0;
2518 int remain = count;
2519 struct sk_buff *skb;
2520 struct bt_skb_cb *scb;
2521
2522 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002523 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302524 return -EILSEQ;
2525
2526 skb = hdev->reassembly[index];
2527
2528 if (!skb) {
2529 switch (type) {
2530 case HCI_ACLDATA_PKT:
2531 len = HCI_MAX_FRAME_SIZE;
2532 hlen = HCI_ACL_HDR_SIZE;
2533 break;
2534 case HCI_EVENT_PKT:
2535 len = HCI_MAX_EVENT_SIZE;
2536 hlen = HCI_EVENT_HDR_SIZE;
2537 break;
2538 case HCI_SCODATA_PKT:
2539 len = HCI_MAX_SCO_SIZE;
2540 hlen = HCI_SCO_HDR_SIZE;
2541 break;
2542 }
2543
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002544 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302545 if (!skb)
2546 return -ENOMEM;
2547
2548 scb = (void *) skb->cb;
2549 scb->expect = hlen;
2550 scb->pkt_type = type;
2551
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302552 hdev->reassembly[index] = skb;
2553 }
2554
2555 while (count) {
2556 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002557 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302558
2559 memcpy(skb_put(skb, len), data, len);
2560
2561 count -= len;
2562 data += len;
2563 scb->expect -= len;
2564 remain = count;
2565
2566 switch (type) {
2567 case HCI_EVENT_PKT:
2568 if (skb->len == HCI_EVENT_HDR_SIZE) {
2569 struct hci_event_hdr *h = hci_event_hdr(skb);
2570 scb->expect = h->plen;
2571
2572 if (skb_tailroom(skb) < scb->expect) {
2573 kfree_skb(skb);
2574 hdev->reassembly[index] = NULL;
2575 return -ENOMEM;
2576 }
2577 }
2578 break;
2579
2580 case HCI_ACLDATA_PKT:
2581 if (skb->len == HCI_ACL_HDR_SIZE) {
2582 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2583 scb->expect = __le16_to_cpu(h->dlen);
2584
2585 if (skb_tailroom(skb) < scb->expect) {
2586 kfree_skb(skb);
2587 hdev->reassembly[index] = NULL;
2588 return -ENOMEM;
2589 }
2590 }
2591 break;
2592
2593 case HCI_SCODATA_PKT:
2594 if (skb->len == HCI_SCO_HDR_SIZE) {
2595 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2596 scb->expect = h->dlen;
2597
2598 if (skb_tailroom(skb) < scb->expect) {
2599 kfree_skb(skb);
2600 hdev->reassembly[index] = NULL;
2601 return -ENOMEM;
2602 }
2603 }
2604 break;
2605 }
2606
2607 if (scb->expect == 0) {
2608 /* Complete frame */
2609
2610 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002611 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302612
2613 hdev->reassembly[index] = NULL;
2614 return remain;
2615 }
2616 }
2617
2618 return remain;
2619}
2620
Marcel Holtmannef222012007-07-11 06:42:04 +02002621int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2622{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302623 int rem = 0;
2624
Marcel Holtmannef222012007-07-11 06:42:04 +02002625 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2626 return -EILSEQ;
2627
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002628 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002629 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302630 if (rem < 0)
2631 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002632
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302633 data += (count - rem);
2634 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002635 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002636
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302637 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002638}
2639EXPORT_SYMBOL(hci_recv_fragment);
2640
Suraj Sumangala99811512010-07-14 13:02:19 +05302641#define STREAM_REASSEMBLY 0
2642
2643int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2644{
2645 int type;
2646 int rem = 0;
2647
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002648 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302649 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2650
2651 if (!skb) {
2652 struct { char type; } *pkt;
2653
2654 /* Start of the frame */
2655 pkt = data;
2656 type = pkt->type;
2657
2658 data++;
2659 count--;
2660 } else
2661 type = bt_cb(skb)->pkt_type;
2662
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002663 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002664 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302665 if (rem < 0)
2666 return rem;
2667
2668 data += (count - rem);
2669 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002670 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302671
2672 return rem;
2673}
2674EXPORT_SYMBOL(hci_recv_stream_fragment);
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676/* ---- Interface to upper protocols ---- */
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678int hci_register_cb(struct hci_cb *cb)
2679{
2680 BT_DBG("%p name %s", cb, cb->name);
2681
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002682 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002684 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685
2686 return 0;
2687}
2688EXPORT_SYMBOL(hci_register_cb);
2689
2690int hci_unregister_cb(struct hci_cb *cb)
2691{
2692 BT_DBG("%p name %s", cb, cb->name);
2693
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002694 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002696 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
2698 return 0;
2699}
2700EXPORT_SYMBOL(hci_unregister_cb);
2701
Marcel Holtmann51086992013-10-10 14:54:19 -07002702static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002704 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002706 /* Time stamp */
2707 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002709 /* Send copy to monitor */
2710 hci_send_to_monitor(hdev, skb);
2711
2712 if (atomic_read(&hdev->promisc)) {
2713 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002714 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 }
2716
2717 /* Get rid of skb owner, prior to sending to the driver. */
2718 skb_orphan(skb);
2719
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002720 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002721 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722}
2723
Johan Hedberg3119ae92013-03-05 20:37:44 +02002724void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2725{
2726 skb_queue_head_init(&req->cmd_q);
2727 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002728 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002729}
2730
2731int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2732{
2733 struct hci_dev *hdev = req->hdev;
2734 struct sk_buff *skb;
2735 unsigned long flags;
2736
2737 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2738
Andre Guedes5d73e032013-03-08 11:20:16 -03002739 /* If an error occured during request building, remove all HCI
2740 * commands queued on the HCI request queue.
2741 */
2742 if (req->err) {
2743 skb_queue_purge(&req->cmd_q);
2744 return req->err;
2745 }
2746
Johan Hedberg3119ae92013-03-05 20:37:44 +02002747 /* Do not allow empty requests */
2748 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002749 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002750
2751 skb = skb_peek_tail(&req->cmd_q);
2752 bt_cb(skb)->req.complete = complete;
2753
2754 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2755 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2756 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2757
2758 queue_work(hdev->workqueue, &hdev->cmd_work);
2759
2760 return 0;
2761}
2762
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002763static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002764 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765{
2766 int len = HCI_COMMAND_HDR_SIZE + plen;
2767 struct hci_command_hdr *hdr;
2768 struct sk_buff *skb;
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002771 if (!skb)
2772 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
2774 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002775 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 hdr->plen = plen;
2777
2778 if (plen)
2779 memcpy(skb_put(skb, plen), param, plen);
2780
2781 BT_DBG("skb len %d", skb->len);
2782
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002783 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002784
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002785 return skb;
2786}
2787
2788/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002789int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2790 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002791{
2792 struct sk_buff *skb;
2793
2794 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2795
2796 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2797 if (!skb) {
2798 BT_ERR("%s no memory for command", hdev->name);
2799 return -ENOMEM;
2800 }
2801
Johan Hedberg11714b32013-03-05 20:37:47 +02002802 /* Stand-alone HCI commands must be flaged as
2803 * single-command requests.
2804 */
2805 bt_cb(skb)->req.start = true;
2806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002808 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
2810 return 0;
2811}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
Johan Hedberg71c76a12013-03-05 20:37:46 +02002813/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002814void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2815 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002816{
2817 struct hci_dev *hdev = req->hdev;
2818 struct sk_buff *skb;
2819
2820 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2821
Andre Guedes34739c12013-03-08 11:20:18 -03002822 /* If an error occured during request building, there is no point in
2823 * queueing the HCI command. We can simply return.
2824 */
2825 if (req->err)
2826 return;
2827
Johan Hedberg71c76a12013-03-05 20:37:46 +02002828 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2829 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002830 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2831 hdev->name, opcode);
2832 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002833 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002834 }
2835
2836 if (skb_queue_empty(&req->cmd_q))
2837 bt_cb(skb)->req.start = true;
2838
Johan Hedberg02350a72013-04-03 21:50:29 +03002839 bt_cb(skb)->req.event = event;
2840
Johan Hedberg71c76a12013-03-05 20:37:46 +02002841 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002842}
2843
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002844void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2845 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002846{
2847 hci_req_add_ev(req, opcode, plen, param, 0);
2848}
2849
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002851void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852{
2853 struct hci_command_hdr *hdr;
2854
2855 if (!hdev->sent_cmd)
2856 return NULL;
2857
2858 hdr = (void *) hdev->sent_cmd->data;
2859
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002860 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 return NULL;
2862
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002863 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
2865 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2866}
2867
2868/* Send ACL data */
2869static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2870{
2871 struct hci_acl_hdr *hdr;
2872 int len = skb->len;
2873
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002874 skb_push(skb, HCI_ACL_HDR_SIZE);
2875 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002876 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002877 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2878 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879}
2880
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002881static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002882 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002884 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 struct hci_dev *hdev = conn->hdev;
2886 struct sk_buff *list;
2887
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002888 skb->len = skb_headlen(skb);
2889 skb->data_len = 0;
2890
2891 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002892
2893 switch (hdev->dev_type) {
2894 case HCI_BREDR:
2895 hci_add_acl_hdr(skb, conn->handle, flags);
2896 break;
2897 case HCI_AMP:
2898 hci_add_acl_hdr(skb, chan->handle, flags);
2899 break;
2900 default:
2901 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2902 return;
2903 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002904
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002905 list = skb_shinfo(skb)->frag_list;
2906 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 /* Non fragmented */
2908 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2909
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002910 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 } else {
2912 /* Fragmented */
2913 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2914
2915 skb_shinfo(skb)->frag_list = NULL;
2916
2917 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002918 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002920 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002921
2922 flags &= ~ACL_START;
2923 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 do {
2925 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002926
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002927 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002928 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929
2930 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2931
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002932 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 } while (list);
2934
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002935 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002937}
2938
2939void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2940{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002941 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002942
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002943 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002944
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002945 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002947 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949
2950/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002951void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952{
2953 struct hci_dev *hdev = conn->hdev;
2954 struct hci_sco_hdr hdr;
2955
2956 BT_DBG("%s len %d", hdev->name, skb->len);
2957
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002958 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 hdr.dlen = skb->len;
2960
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002961 skb_push(skb, HCI_SCO_HDR_SIZE);
2962 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002963 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002965 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002966
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002968 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970
2971/* ---- HCI TX task (outgoing data) ---- */
2972
2973/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002974static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2975 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976{
2977 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002978 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002979 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002981 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002983
2984 rcu_read_lock();
2985
2986 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002987 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002989
2990 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2991 continue;
2992
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 num++;
2994
2995 if (c->sent < min) {
2996 min = c->sent;
2997 conn = c;
2998 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002999
3000 if (hci_conn_num(hdev, type) == num)
3001 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 }
3003
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003004 rcu_read_unlock();
3005
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003007 int cnt, q;
3008
3009 switch (conn->type) {
3010 case ACL_LINK:
3011 cnt = hdev->acl_cnt;
3012 break;
3013 case SCO_LINK:
3014 case ESCO_LINK:
3015 cnt = hdev->sco_cnt;
3016 break;
3017 case LE_LINK:
3018 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3019 break;
3020 default:
3021 cnt = 0;
3022 BT_ERR("Unknown link type");
3023 }
3024
3025 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 *quote = q ? q : 1;
3027 } else
3028 *quote = 0;
3029
3030 BT_DBG("conn %p quote %d", conn, *quote);
3031 return conn;
3032}
3033
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003034static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035{
3036 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003037 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Ville Tervobae1f5d92011-02-10 22:38:53 -03003039 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003041 rcu_read_lock();
3042
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003044 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003045 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003046 BT_ERR("%s killing stalled connection %pMR",
3047 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003048 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 }
3050 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003051
3052 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053}
3054
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003055static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3056 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003057{
3058 struct hci_conn_hash *h = &hdev->conn_hash;
3059 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003060 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003061 struct hci_conn *conn;
3062 int cnt, q, conn_num = 0;
3063
3064 BT_DBG("%s", hdev->name);
3065
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003066 rcu_read_lock();
3067
3068 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003069 struct hci_chan *tmp;
3070
3071 if (conn->type != type)
3072 continue;
3073
3074 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3075 continue;
3076
3077 conn_num++;
3078
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003079 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003080 struct sk_buff *skb;
3081
3082 if (skb_queue_empty(&tmp->data_q))
3083 continue;
3084
3085 skb = skb_peek(&tmp->data_q);
3086 if (skb->priority < cur_prio)
3087 continue;
3088
3089 if (skb->priority > cur_prio) {
3090 num = 0;
3091 min = ~0;
3092 cur_prio = skb->priority;
3093 }
3094
3095 num++;
3096
3097 if (conn->sent < min) {
3098 min = conn->sent;
3099 chan = tmp;
3100 }
3101 }
3102
3103 if (hci_conn_num(hdev, type) == conn_num)
3104 break;
3105 }
3106
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003107 rcu_read_unlock();
3108
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003109 if (!chan)
3110 return NULL;
3111
3112 switch (chan->conn->type) {
3113 case ACL_LINK:
3114 cnt = hdev->acl_cnt;
3115 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003116 case AMP_LINK:
3117 cnt = hdev->block_cnt;
3118 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003119 case SCO_LINK:
3120 case ESCO_LINK:
3121 cnt = hdev->sco_cnt;
3122 break;
3123 case LE_LINK:
3124 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3125 break;
3126 default:
3127 cnt = 0;
3128 BT_ERR("Unknown link type");
3129 }
3130
3131 q = cnt / num;
3132 *quote = q ? q : 1;
3133 BT_DBG("chan %p quote %d", chan, *quote);
3134 return chan;
3135}
3136
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003137static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3138{
3139 struct hci_conn_hash *h = &hdev->conn_hash;
3140 struct hci_conn *conn;
3141 int num = 0;
3142
3143 BT_DBG("%s", hdev->name);
3144
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003145 rcu_read_lock();
3146
3147 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003148 struct hci_chan *chan;
3149
3150 if (conn->type != type)
3151 continue;
3152
3153 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3154 continue;
3155
3156 num++;
3157
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003158 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003159 struct sk_buff *skb;
3160
3161 if (chan->sent) {
3162 chan->sent = 0;
3163 continue;
3164 }
3165
3166 if (skb_queue_empty(&chan->data_q))
3167 continue;
3168
3169 skb = skb_peek(&chan->data_q);
3170 if (skb->priority >= HCI_PRIO_MAX - 1)
3171 continue;
3172
3173 skb->priority = HCI_PRIO_MAX - 1;
3174
3175 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003176 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003177 }
3178
3179 if (hci_conn_num(hdev, type) == num)
3180 break;
3181 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003182
3183 rcu_read_unlock();
3184
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003185}
3186
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003187static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3188{
3189 /* Calculate count of blocks used by this packet */
3190 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3191}
3192
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003193static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 if (!test_bit(HCI_RAW, &hdev->flags)) {
3196 /* ACL tx timeout must be longer than maximum
3197 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003198 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003199 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003200 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003202}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003204static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003205{
3206 unsigned int cnt = hdev->acl_cnt;
3207 struct hci_chan *chan;
3208 struct sk_buff *skb;
3209 int quote;
3210
3211 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003212
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003213 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003214 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003215 u32 priority = (skb_peek(&chan->data_q))->priority;
3216 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003217 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003218 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003219
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003220 /* Stop if priority has changed */
3221 if (skb->priority < priority)
3222 break;
3223
3224 skb = skb_dequeue(&chan->data_q);
3225
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003226 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003227 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003228
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003229 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 hdev->acl_last_tx = jiffies;
3231
3232 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003233 chan->sent++;
3234 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 }
3236 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003237
3238 if (cnt != hdev->acl_cnt)
3239 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240}
3241
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003242static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003243{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003244 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003245 struct hci_chan *chan;
3246 struct sk_buff *skb;
3247 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003248 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003249
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003250 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003251
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003252 BT_DBG("%s", hdev->name);
3253
3254 if (hdev->dev_type == HCI_AMP)
3255 type = AMP_LINK;
3256 else
3257 type = ACL_LINK;
3258
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003259 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003260 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003261 u32 priority = (skb_peek(&chan->data_q))->priority;
3262 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3263 int blocks;
3264
3265 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003266 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003267
3268 /* Stop if priority has changed */
3269 if (skb->priority < priority)
3270 break;
3271
3272 skb = skb_dequeue(&chan->data_q);
3273
3274 blocks = __get_blocks(hdev, skb);
3275 if (blocks > hdev->block_cnt)
3276 return;
3277
3278 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003279 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003280
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003281 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003282 hdev->acl_last_tx = jiffies;
3283
3284 hdev->block_cnt -= blocks;
3285 quote -= blocks;
3286
3287 chan->sent += blocks;
3288 chan->conn->sent += blocks;
3289 }
3290 }
3291
3292 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003293 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003294}
3295
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003296static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003297{
3298 BT_DBG("%s", hdev->name);
3299
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003300 /* No ACL link over BR/EDR controller */
3301 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3302 return;
3303
3304 /* No AMP link over AMP controller */
3305 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003306 return;
3307
3308 switch (hdev->flow_ctl_mode) {
3309 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3310 hci_sched_acl_pkt(hdev);
3311 break;
3312
3313 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3314 hci_sched_acl_blk(hdev);
3315 break;
3316 }
3317}
3318
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003320static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321{
3322 struct hci_conn *conn;
3323 struct sk_buff *skb;
3324 int quote;
3325
3326 BT_DBG("%s", hdev->name);
3327
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003328 if (!hci_conn_num(hdev, SCO_LINK))
3329 return;
3330
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3332 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3333 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003334 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335
3336 conn->sent++;
3337 if (conn->sent == ~0)
3338 conn->sent = 0;
3339 }
3340 }
3341}
3342
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003343static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003344{
3345 struct hci_conn *conn;
3346 struct sk_buff *skb;
3347 int quote;
3348
3349 BT_DBG("%s", hdev->name);
3350
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003351 if (!hci_conn_num(hdev, ESCO_LINK))
3352 return;
3353
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003354 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3355 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003356 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3357 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003358 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003359
3360 conn->sent++;
3361 if (conn->sent == ~0)
3362 conn->sent = 0;
3363 }
3364 }
3365}
3366
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003367static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003368{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003369 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003370 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003371 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003372
3373 BT_DBG("%s", hdev->name);
3374
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003375 if (!hci_conn_num(hdev, LE_LINK))
3376 return;
3377
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003378 if (!test_bit(HCI_RAW, &hdev->flags)) {
3379 /* LE tx timeout must be longer than maximum
3380 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003381 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003382 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003383 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003384 }
3385
3386 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003387 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003388 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003389 u32 priority = (skb_peek(&chan->data_q))->priority;
3390 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003391 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003392 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003393
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003394 /* Stop if priority has changed */
3395 if (skb->priority < priority)
3396 break;
3397
3398 skb = skb_dequeue(&chan->data_q);
3399
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003400 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003401 hdev->le_last_tx = jiffies;
3402
3403 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003404 chan->sent++;
3405 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003406 }
3407 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003408
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003409 if (hdev->le_pkts)
3410 hdev->le_cnt = cnt;
3411 else
3412 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003413
3414 if (cnt != tmp)
3415 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003416}
3417
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003418static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003420 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 struct sk_buff *skb;
3422
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003423 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003424 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425
Marcel Holtmann52de5992013-09-03 18:08:38 -07003426 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3427 /* Schedule queues and send stuff to HCI driver */
3428 hci_sched_acl(hdev);
3429 hci_sched_sco(hdev);
3430 hci_sched_esco(hdev);
3431 hci_sched_le(hdev);
3432 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003433
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 /* Send next queued raw (unknown type) packet */
3435 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003436 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437}
3438
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003439/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
3441/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003442static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443{
3444 struct hci_acl_hdr *hdr = (void *) skb->data;
3445 struct hci_conn *conn;
3446 __u16 handle, flags;
3447
3448 skb_pull(skb, HCI_ACL_HDR_SIZE);
3449
3450 handle = __le16_to_cpu(hdr->handle);
3451 flags = hci_flags(handle);
3452 handle = hci_handle(handle);
3453
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003454 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003455 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456
3457 hdev->stat.acl_rx++;
3458
3459 hci_dev_lock(hdev);
3460 conn = hci_conn_hash_lookup_handle(hdev, handle);
3461 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003464 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003465
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003467 l2cap_recv_acldata(conn, skb, flags);
3468 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003470 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003471 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 }
3473
3474 kfree_skb(skb);
3475}
3476
3477/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003478static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479{
3480 struct hci_sco_hdr *hdr = (void *) skb->data;
3481 struct hci_conn *conn;
3482 __u16 handle;
3483
3484 skb_pull(skb, HCI_SCO_HDR_SIZE);
3485
3486 handle = __le16_to_cpu(hdr->handle);
3487
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003488 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489
3490 hdev->stat.sco_rx++;
3491
3492 hci_dev_lock(hdev);
3493 conn = hci_conn_hash_lookup_handle(hdev, handle);
3494 hci_dev_unlock(hdev);
3495
3496 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003498 sco_recv_scodata(conn, skb);
3499 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003501 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003502 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 }
3504
3505 kfree_skb(skb);
3506}
3507
Johan Hedberg9238f362013-03-05 20:37:48 +02003508static bool hci_req_is_complete(struct hci_dev *hdev)
3509{
3510 struct sk_buff *skb;
3511
3512 skb = skb_peek(&hdev->cmd_q);
3513 if (!skb)
3514 return true;
3515
3516 return bt_cb(skb)->req.start;
3517}
3518
Johan Hedberg42c6b122013-03-05 20:37:49 +02003519static void hci_resend_last(struct hci_dev *hdev)
3520{
3521 struct hci_command_hdr *sent;
3522 struct sk_buff *skb;
3523 u16 opcode;
3524
3525 if (!hdev->sent_cmd)
3526 return;
3527
3528 sent = (void *) hdev->sent_cmd->data;
3529 opcode = __le16_to_cpu(sent->opcode);
3530 if (opcode == HCI_OP_RESET)
3531 return;
3532
3533 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3534 if (!skb)
3535 return;
3536
3537 skb_queue_head(&hdev->cmd_q, skb);
3538 queue_work(hdev->workqueue, &hdev->cmd_work);
3539}
3540
Johan Hedberg9238f362013-03-05 20:37:48 +02003541void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3542{
3543 hci_req_complete_t req_complete = NULL;
3544 struct sk_buff *skb;
3545 unsigned long flags;
3546
3547 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3548
Johan Hedberg42c6b122013-03-05 20:37:49 +02003549 /* If the completed command doesn't match the last one that was
3550 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003551 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003552 if (!hci_sent_cmd_data(hdev, opcode)) {
3553 /* Some CSR based controllers generate a spontaneous
3554 * reset complete event during init and any pending
3555 * command will never be completed. In such a case we
3556 * need to resend whatever was the last sent
3557 * command.
3558 */
3559 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3560 hci_resend_last(hdev);
3561
Johan Hedberg9238f362013-03-05 20:37:48 +02003562 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003563 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003564
3565 /* If the command succeeded and there's still more commands in
3566 * this request the request is not yet complete.
3567 */
3568 if (!status && !hci_req_is_complete(hdev))
3569 return;
3570
3571 /* If this was the last command in a request the complete
3572 * callback would be found in hdev->sent_cmd instead of the
3573 * command queue (hdev->cmd_q).
3574 */
3575 if (hdev->sent_cmd) {
3576 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003577
3578 if (req_complete) {
3579 /* We must set the complete callback to NULL to
3580 * avoid calling the callback more than once if
3581 * this function gets called again.
3582 */
3583 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3584
Johan Hedberg9238f362013-03-05 20:37:48 +02003585 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003586 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003587 }
3588
3589 /* Remove all pending commands belonging to this request */
3590 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3591 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3592 if (bt_cb(skb)->req.start) {
3593 __skb_queue_head(&hdev->cmd_q, skb);
3594 break;
3595 }
3596
3597 req_complete = bt_cb(skb)->req.complete;
3598 kfree_skb(skb);
3599 }
3600 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3601
3602call_complete:
3603 if (req_complete)
3604 req_complete(hdev, status);
3605}
3606
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003607static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003609 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 struct sk_buff *skb;
3611
3612 BT_DBG("%s", hdev->name);
3613
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003615 /* Send copy to monitor */
3616 hci_send_to_monitor(hdev, skb);
3617
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 if (atomic_read(&hdev->promisc)) {
3619 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003620 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 }
3622
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003623 if (test_bit(HCI_RAW, &hdev->flags) ||
3624 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 kfree_skb(skb);
3626 continue;
3627 }
3628
3629 if (test_bit(HCI_INIT, &hdev->flags)) {
3630 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003631 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 case HCI_ACLDATA_PKT:
3633 case HCI_SCODATA_PKT:
3634 kfree_skb(skb);
3635 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003636 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 }
3638
3639 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003640 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003642 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 hci_event_packet(hdev, skb);
3644 break;
3645
3646 case HCI_ACLDATA_PKT:
3647 BT_DBG("%s ACL data packet", hdev->name);
3648 hci_acldata_packet(hdev, skb);
3649 break;
3650
3651 case HCI_SCODATA_PKT:
3652 BT_DBG("%s SCO data packet", hdev->name);
3653 hci_scodata_packet(hdev, skb);
3654 break;
3655
3656 default:
3657 kfree_skb(skb);
3658 break;
3659 }
3660 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661}
3662
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003663static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003665 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 struct sk_buff *skb;
3667
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003668 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3669 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003672 if (atomic_read(&hdev->cmd_cnt)) {
3673 skb = skb_dequeue(&hdev->cmd_q);
3674 if (!skb)
3675 return;
3676
Wei Yongjun7585b972009-02-25 18:29:52 +08003677 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003679 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003680 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003682 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003683 if (test_bit(HCI_RESET, &hdev->flags))
3684 del_timer(&hdev->cmd_timer);
3685 else
3686 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003687 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 } else {
3689 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003690 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691 }
3692 }
3693}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003694
Andre Guedes31f79562012-04-24 21:02:53 -03003695u8 bdaddr_to_le(u8 bdaddr_type)
3696{
3697 switch (bdaddr_type) {
3698 case BDADDR_LE_PUBLIC:
3699 return ADDR_LE_DEV_PUBLIC;
3700
3701 default:
3702 /* Fallback to LE Random address type */
3703 return ADDR_LE_DEV_RANDOM;
3704 }
3705}