blob: d9626aad7ade95e1cd7c48b7933ce9edb623d713 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300522 else
523 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524
525 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300530 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
531 * local supported commands HCI command.
532 */
533 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
536 if (lmp_ssp_capable(hdev)) {
537 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
538 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
540 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 } else {
542 struct hci_cp_write_eir cp;
543
544 memset(hdev->eir, 0, sizeof(hdev->eir));
545 memset(&cp, 0, sizeof(cp));
546
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200548 }
549 }
550
551 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553
554 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556
557 if (lmp_ext_feat_capable(hdev)) {
558 struct hci_cp_read_local_ext_features cp;
559
560 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
562 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 }
564
565 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
566 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
568 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200569 }
570}
571
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 struct hci_cp_write_def_link_policy cp;
576 u16 link_policy = 0;
577
578 if (lmp_rswitch_capable(hdev))
579 link_policy |= HCI_LP_RSWITCH;
580 if (lmp_hold_capable(hdev))
581 link_policy |= HCI_LP_HOLD;
582 if (lmp_sniff_capable(hdev))
583 link_policy |= HCI_LP_SNIFF;
584 if (lmp_park_capable(hdev))
585 link_policy |= HCI_LP_PARK;
586
587 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200588 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200589}
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594 struct hci_cp_write_le_host_supported cp;
595
Johan Hedbergc73eee92013-04-19 18:35:21 +0300596 /* LE-only devices do not support explicit enablement */
597 if (!lmp_bredr_capable(hdev))
598 return;
599
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600 memset(&cp, 0, sizeof(cp));
601
602 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
603 cp.le = 0x01;
604 cp.simul = lmp_le_br_capable(hdev);
605 }
606
607 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200608 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
609 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610}
611
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300612static void hci_set_event_mask_page_2(struct hci_request *req)
613{
614 struct hci_dev *hdev = req->hdev;
615 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
616
617 /* If Connectionless Slave Broadcast master role is supported
618 * enable all necessary events for it.
619 */
620 if (hdev->features[2][0] & 0x01) {
621 events[1] |= 0x40; /* Triggered Clock Capture */
622 events[1] |= 0x80; /* Synchronization Train Complete */
623 events[2] |= 0x10; /* Slave Page Response Timeout */
624 events[2] |= 0x20; /* CSB Channel Map Change */
625 }
626
627 /* If Connectionless Slave Broadcast slave role is supported
628 * enable all necessary events for it.
629 */
630 if (hdev->features[2][0] & 0x02) {
631 events[2] |= 0x01; /* Synchronization Train Received */
632 events[2] |= 0x02; /* CSB Receive */
633 events[2] |= 0x04; /* CSB Timeout */
634 events[2] |= 0x08; /* Truncated Page Complete */
635 }
636
637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638}
639
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300643 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100645 /* Some Broadcom based Bluetooth controllers do not support the
646 * Delete Stored Link Key command. They are clearly indicating its
647 * absence in the bit mask of supported commands.
648 *
649 * Check the supported commands and only if the the command is marked
650 * as supported send it. If not supported assume that the controller
651 * does not have actual support for stored link keys which makes this
652 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700653 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300654 if (hdev->commands[6] & 0x80) {
655 struct hci_cp_delete_stored_link_key cp;
656
657 bacpy(&cp.bdaddr, BDADDR_ANY);
658 cp.delete_all = 0x01;
659 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
660 sizeof(cp), &cp);
661 }
662
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200665
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500668 hci_update_ad(req);
669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300670
671 /* Read features beyond page 1 if available */
672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
673 struct hci_cp_read_local_ext_features cp;
674
675 cp.page = p;
676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
677 sizeof(cp), &cp);
678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679}
680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300681static void hci_init4_req(struct hci_request *req, unsigned long opt)
682{
683 struct hci_dev *hdev = req->hdev;
684
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300685 /* Set event mask page 2 if the HCI command for it is supported */
686 if (hdev->commands[22] & 0x04)
687 hci_set_event_mask_page_2(req);
688
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300689 /* Check for Synchronization Train support */
690 if (hdev->features[2][0] & 0x04)
691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692}
693
Johan Hedberg2177bab2013-03-05 20:37:43 +0200694static int __hci_init(struct hci_dev *hdev)
695{
696 int err;
697
698 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
699 if (err < 0)
700 return err;
701
702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
703 * BR/EDR/LE type controllers. AMP controllers only need the
704 * first stage init.
705 */
706 if (hdev->dev_type != HCI_BREDR)
707 return 0;
708
709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
710 if (err < 0)
711 return err;
712
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
714 if (err < 0)
715 return err;
716
717 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200718}
719
Johan Hedberg42c6b122013-03-05 20:37:49 +0200720static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 __u8 scan = opt;
723
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
732 __u8 auth = opt;
733
Johan Hedberg42c6b122013-03-05 20:37:49 +0200734 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738}
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
742 __u8 encrypt = opt;
743
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200746 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200751{
752 __le16 policy = cpu_to_le16(opt);
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200755
756 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758}
759
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900760/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 * Device is held on return. */
762struct hci_dev *hci_dev_get(int index)
763{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200764 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 BT_DBG("%d", index);
767
768 if (index < 0)
769 return NULL;
770
771 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200772 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (d->id == index) {
774 hdev = hci_dev_hold(d);
775 break;
776 }
777 }
778 read_unlock(&hci_dev_list_lock);
779 return hdev;
780}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200783
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200784bool hci_discovery_active(struct hci_dev *hdev)
785{
786 struct discovery_state *discov = &hdev->discovery;
787
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300789 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300790 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200791 return true;
792
Andre Guedes6fbe1952012-02-03 17:47:58 -0300793 default:
794 return false;
795 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200796}
797
Johan Hedbergff9ef572012-01-04 14:23:45 +0200798void hci_discovery_set_state(struct hci_dev *hdev, int state)
799{
800 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
801
802 if (hdev->discovery.state == state)
803 return;
804
805 switch (state) {
806 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300807 if (hdev->discovery.state != DISCOVERY_STARTING)
808 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200809 break;
810 case DISCOVERY_STARTING:
811 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300812 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200813 mgmt_discovering(hdev, 1);
814 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200815 case DISCOVERY_RESOLVING:
816 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200817 case DISCOVERY_STOPPING:
818 break;
819 }
820
821 hdev->discovery.state = state;
822}
823
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300824void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
Johan Hedberg30883512012-01-04 14:16:21 +0200826 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200827 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Johan Hedberg561aafb2012-01-04 13:31:59 +0200829 list_for_each_entry_safe(p, n, &cache->all, all) {
830 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200831 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200833
834 INIT_LIST_HEAD(&cache->unknown);
835 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300838struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Johan Hedberg30883512012-01-04 14:16:21 +0200841 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 struct inquiry_entry *e;
843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300844 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Johan Hedberg561aafb2012-01-04 13:31:59 +0200846 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200848 return e;
849 }
850
851 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300855 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856{
Johan Hedberg30883512012-01-04 14:16:21 +0200857 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200858 struct inquiry_entry *e;
859
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300860 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200861
862 list_for_each_entry(e, &cache->unknown, list) {
863 if (!bacmp(&e->data.bdaddr, bdaddr))
864 return e;
865 }
866
867 return NULL;
868}
869
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200870struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300871 bdaddr_t *bdaddr,
872 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200873{
874 struct discovery_state *cache = &hdev->discovery;
875 struct inquiry_entry *e;
876
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300877 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200878
879 list_for_each_entry(e, &cache->resolve, list) {
880 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
881 return e;
882 if (!bacmp(&e->data.bdaddr, bdaddr))
883 return e;
884 }
885
886 return NULL;
887}
888
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300890 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200891{
892 struct discovery_state *cache = &hdev->discovery;
893 struct list_head *pos = &cache->resolve;
894 struct inquiry_entry *p;
895
896 list_del(&ie->list);
897
898 list_for_each_entry(p, &cache->resolve, list) {
899 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300900 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200901 break;
902 pos = &p->list;
903 }
904
905 list_add(&ie->list, pos);
906}
907
Johan Hedberg31754052012-01-04 13:39:52 +0200908bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300909 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
Johan Hedberg30883512012-01-04 14:16:21 +0200911 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200912 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300914 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Szymon Janc2b2fec42012-11-20 11:38:54 +0100916 hci_remove_remote_oob_data(hdev, &data->bdaddr);
917
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200918 if (ssp)
919 *ssp = data->ssp_mode;
920
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200921 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200922 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200923 if (ie->data.ssp_mode && ssp)
924 *ssp = true;
925
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300927 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200928 ie->data.rssi = data->rssi;
929 hci_inquiry_cache_update_resolve(hdev, ie);
930 }
931
Johan Hedberg561aafb2012-01-04 13:31:59 +0200932 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200933 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200934
Johan Hedberg561aafb2012-01-04 13:31:59 +0200935 /* Entry not in the cache. Add new one. */
936 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
937 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200938 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200939
940 list_add(&ie->all, &cache->all);
941
942 if (name_known) {
943 ie->name_state = NAME_KNOWN;
944 } else {
945 ie->name_state = NAME_NOT_KNOWN;
946 list_add(&ie->list, &cache->unknown);
947 }
948
949update:
950 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300951 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200952 ie->name_state = NAME_KNOWN;
953 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
955
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200956 memcpy(&ie->data, data, sizeof(*data));
957 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200959
960 if (ie->name_state == NAME_NOT_KNOWN)
961 return false;
962
963 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
966static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
967{
Johan Hedberg30883512012-01-04 14:16:21 +0200968 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 struct inquiry_info *info = (struct inquiry_info *) buf;
970 struct inquiry_entry *e;
971 int copied = 0;
972
Johan Hedberg561aafb2012-01-04 13:31:59 +0200973 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200975
976 if (copied >= num)
977 break;
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 bacpy(&info->bdaddr, &data->bdaddr);
980 info->pscan_rep_mode = data->pscan_rep_mode;
981 info->pscan_period_mode = data->pscan_period_mode;
982 info->pscan_mode = data->pscan_mode;
983 memcpy(info->dev_class, data->dev_class, 3);
984 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989
990 BT_DBG("cache %p, copied %d", cache, copied);
991 return copied;
992}
993
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995{
996 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 struct hci_cp_inquiry cp;
999
1000 BT_DBG("%s", hdev->name);
1001
1002 if (test_bit(HCI_INQUIRY, &hdev->flags))
1003 return;
1004
1005 /* Start Inquiry */
1006 memcpy(&cp.lap, &ir->lap, 3);
1007 cp.length = ir->length;
1008 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010}
1011
Andre Guedes3e13fa12013-03-27 20:04:56 -03001012static int wait_inquiry(void *word)
1013{
1014 schedule();
1015 return signal_pending(current);
1016}
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018int hci_inquiry(void __user *arg)
1019{
1020 __u8 __user *ptr = arg;
1021 struct hci_inquiry_req ir;
1022 struct hci_dev *hdev;
1023 int err = 0, do_inquiry = 0, max_rsp;
1024 long timeo;
1025 __u8 *buf;
1026
1027 if (copy_from_user(&ir, ptr, sizeof(ir)))
1028 return -EFAULT;
1029
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001030 hdev = hci_dev_get(ir.dev_id);
1031 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return -ENODEV;
1033
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001034 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1035 err = -EBUSY;
1036 goto done;
1037 }
1038
Johan Hedberg56f87902013-10-02 13:43:13 +03001039 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1040 err = -EOPNOTSUPP;
1041 goto done;
1042 }
1043
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001044 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001045 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001046 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001047 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 do_inquiry = 1;
1049 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001050 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Marcel Holtmann04837f62006-07-03 10:02:33 +02001052 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001053
1054 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001055 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1056 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001057 if (err < 0)
1058 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001059
1060 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1061 * cleared). If it is interrupted by a signal, return -EINTR.
1062 */
1063 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1064 TASK_INTERRUPTIBLE))
1065 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001068 /* for unlimited number of responses we will use buffer with
1069 * 255 entries
1070 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1072
1073 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1074 * copy it to the user space.
1075 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001076 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001077 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 err = -ENOMEM;
1079 goto done;
1080 }
1081
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001082 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001084 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 BT_DBG("num_rsp %d", ir.num_rsp);
1087
1088 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1089 ptr += sizeof(ir);
1090 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001091 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001093 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 err = -EFAULT;
1095
1096 kfree(buf);
1097
1098done:
1099 hci_dev_put(hdev);
1100 return err;
1101}
1102
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001103static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1104{
1105 u8 ad_len = 0, flags = 0;
1106 size_t name_len;
1107
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001108 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109 flags |= LE_AD_GENERAL;
1110
Johan Hedberg11802b22013-10-02 16:02:24 +03001111 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1112 if (lmp_le_br_capable(hdev))
1113 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1114 if (lmp_host_le_br_capable(hdev))
1115 flags |= LE_AD_SIM_LE_BREDR_HOST;
1116 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001117 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001118 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001119
1120 if (flags) {
1121 BT_DBG("adv flags 0x%02x", flags);
1122
1123 ptr[0] = 2;
1124 ptr[1] = EIR_FLAGS;
1125 ptr[2] = flags;
1126
1127 ad_len += 3;
1128 ptr += 3;
1129 }
1130
1131 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1132 ptr[0] = 2;
1133 ptr[1] = EIR_TX_POWER;
1134 ptr[2] = (u8) hdev->adv_tx_power;
1135
1136 ad_len += 3;
1137 ptr += 3;
1138 }
1139
1140 name_len = strlen(hdev->dev_name);
1141 if (name_len > 0) {
1142 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1143
1144 if (name_len > max_len) {
1145 name_len = max_len;
1146 ptr[1] = EIR_NAME_SHORT;
1147 } else
1148 ptr[1] = EIR_NAME_COMPLETE;
1149
1150 ptr[0] = name_len + 1;
1151
1152 memcpy(ptr + 2, hdev->dev_name, name_len);
1153
1154 ad_len += (name_len + 2);
1155 ptr += (name_len + 2);
1156 }
1157
1158 return ad_len;
1159}
1160
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001161void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001163 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001164 struct hci_cp_le_set_adv_data cp;
1165 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001166
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001167 if (!lmp_le_capable(hdev))
1168 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001169
1170 memset(&cp, 0, sizeof(cp));
1171
1172 len = create_ad(hdev, cp.data);
1173
1174 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001175 memcmp(cp.data, hdev->adv_data, len) == 0)
1176 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177
1178 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179 hdev->adv_data_len = len;
1180
1181 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001182
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001183 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001184}
1185
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001186static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 int ret = 0;
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
Johan Hovold94324962012-03-15 14:48:41 +01001194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001199 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1200 /* Check for rfkill but allow the HCI setup stage to
1201 * proceed (which in itself doesn't cause any RF activity).
1202 */
1203 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
1208 /* Check for valid public address or a configured static
1209 * random adddress, but let the HCI setup proceed to
1210 * be able to determine if there is a public address
1211 * or not.
1212 *
1213 * This check is only valid for BR/EDR controllers
1214 * since AMP controllers do not have an address.
1215 */
1216 if (hdev->dev_type == HCI_BREDR &&
1217 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1218 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1219 ret = -EADDRNOTAVAIL;
1220 goto done;
1221 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001222 }
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 if (test_bit(HCI_UP, &hdev->flags)) {
1225 ret = -EALREADY;
1226 goto done;
1227 }
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 if (hdev->open(hdev)) {
1230 ret = -EIO;
1231 goto done;
1232 }
1233
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001234 atomic_set(&hdev->cmd_cnt, 1);
1235 set_bit(HCI_INIT, &hdev->flags);
1236
1237 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1238 ret = hdev->setup(hdev);
1239
1240 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001241 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1242 set_bit(HCI_RAW, &hdev->flags);
1243
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001244 if (!test_bit(HCI_RAW, &hdev->flags) &&
1245 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001246 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 }
1248
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001249 clear_bit(HCI_INIT, &hdev->flags);
1250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 if (!ret) {
1252 hci_dev_hold(hdev);
1253 set_bit(HCI_UP, &hdev->flags);
1254 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001255 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001256 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001257 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001258 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001259 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001260 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001261 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001262 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001264 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001265 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001266 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 skb_queue_purge(&hdev->cmd_q);
1269 skb_queue_purge(&hdev->rx_q);
1270
1271 if (hdev->flush)
1272 hdev->flush(hdev);
1273
1274 if (hdev->sent_cmd) {
1275 kfree_skb(hdev->sent_cmd);
1276 hdev->sent_cmd = NULL;
1277 }
1278
1279 hdev->close(hdev);
1280 hdev->flags = 0;
1281 }
1282
1283done:
1284 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return ret;
1286}
1287
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001288/* ---- HCI ioctl helpers ---- */
1289
1290int hci_dev_open(__u16 dev)
1291{
1292 struct hci_dev *hdev;
1293 int err;
1294
1295 hdev = hci_dev_get(dev);
1296 if (!hdev)
1297 return -ENODEV;
1298
Johan Hedberge1d08f42013-10-01 22:44:50 +03001299 /* We need to ensure that no other power on/off work is pending
1300 * before proceeding to call hci_dev_do_open. This is
1301 * particularly important if the setup procedure has not yet
1302 * completed.
1303 */
1304 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1305 cancel_delayed_work(&hdev->power_off);
1306
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001307 /* After this call it is guaranteed that the setup procedure
1308 * has finished. This means that error conditions like RFKILL
1309 * or no valid public or static random address apply.
1310 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001311 flush_workqueue(hdev->req_workqueue);
1312
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001313 err = hci_dev_do_open(hdev);
1314
1315 hci_dev_put(hdev);
1316
1317 return err;
1318}
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320static int hci_dev_do_close(struct hci_dev *hdev)
1321{
1322 BT_DBG("%s %p", hdev->name, hdev);
1323
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001324 cancel_delayed_work(&hdev->power_off);
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 hci_req_cancel(hdev, ENODEV);
1327 hci_req_lock(hdev);
1328
1329 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001330 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 hci_req_unlock(hdev);
1332 return 0;
1333 }
1334
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001335 /* Flush RX and TX works */
1336 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001337 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001339 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001340 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001341 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001342 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001343 }
1344
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001345 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001346 cancel_delayed_work(&hdev->service_cache);
1347
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001348 cancel_delayed_work_sync(&hdev->le_scan_disable);
1349
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001350 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001351 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001353 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 hci_notify(hdev, HCI_DEV_DOWN);
1356
1357 if (hdev->flush)
1358 hdev->flush(hdev);
1359
1360 /* Reset device */
1361 skb_queue_purge(&hdev->cmd_q);
1362 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001363 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001364 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001366 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 clear_bit(HCI_INIT, &hdev->flags);
1368 }
1369
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001370 /* flush cmd work */
1371 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 /* Drop queues */
1374 skb_queue_purge(&hdev->rx_q);
1375 skb_queue_purge(&hdev->cmd_q);
1376 skb_queue_purge(&hdev->raw_q);
1377
1378 /* Drop last sent command */
1379 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001380 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 kfree_skb(hdev->sent_cmd);
1382 hdev->sent_cmd = NULL;
1383 }
1384
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001385 kfree_skb(hdev->recv_evt);
1386 hdev->recv_evt = NULL;
1387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 /* After this point our queues are empty
1389 * and no tasks are scheduled. */
1390 hdev->close(hdev);
1391
Johan Hedberg35b973c2013-03-15 17:06:59 -05001392 /* Clear flags */
1393 hdev->flags = 0;
1394 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1395
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001396 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001397 hdev->dev_type == HCI_BREDR) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001398 hci_dev_lock(hdev);
1399 mgmt_powered(hdev, 0);
1400 hci_dev_unlock(hdev);
1401 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001402
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001403 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001404 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001405
Johan Hedberge59fda82012-02-22 18:11:53 +02001406 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001407 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 hci_req_unlock(hdev);
1410
1411 hci_dev_put(hdev);
1412 return 0;
1413}
1414
1415int hci_dev_close(__u16 dev)
1416{
1417 struct hci_dev *hdev;
1418 int err;
1419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001420 hdev = hci_dev_get(dev);
1421 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001423
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001424 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1425 err = -EBUSY;
1426 goto done;
1427 }
1428
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1430 cancel_delayed_work(&hdev->power_off);
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001433
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001434done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 hci_dev_put(hdev);
1436 return err;
1437}
1438
1439int hci_dev_reset(__u16 dev)
1440{
1441 struct hci_dev *hdev;
1442 int ret = 0;
1443
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001444 hdev = hci_dev_get(dev);
1445 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return -ENODEV;
1447
1448 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Marcel Holtmann808a0492013-08-26 20:57:58 -07001450 if (!test_bit(HCI_UP, &hdev->flags)) {
1451 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001455 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1456 ret = -EBUSY;
1457 goto done;
1458 }
1459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 /* Drop queues */
1461 skb_queue_purge(&hdev->rx_q);
1462 skb_queue_purge(&hdev->cmd_q);
1463
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001464 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001465 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001467 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
1469 if (hdev->flush)
1470 hdev->flush(hdev);
1471
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001472 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001473 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
1475 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001476 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 hci_req_unlock(hdev);
1480 hci_dev_put(hdev);
1481 return ret;
1482}
1483
1484int hci_dev_reset_stat(__u16 dev)
1485{
1486 struct hci_dev *hdev;
1487 int ret = 0;
1488
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001489 hdev = hci_dev_get(dev);
1490 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 return -ENODEV;
1492
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001493 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1494 ret = -EBUSY;
1495 goto done;
1496 }
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001500done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 return ret;
1503}
1504
1505int hci_dev_cmd(unsigned int cmd, void __user *arg)
1506{
1507 struct hci_dev *hdev;
1508 struct hci_dev_req dr;
1509 int err = 0;
1510
1511 if (copy_from_user(&dr, arg, sizeof(dr)))
1512 return -EFAULT;
1513
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001514 hdev = hci_dev_get(dr.dev_id);
1515 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 return -ENODEV;
1517
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001518 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1519 err = -EBUSY;
1520 goto done;
1521 }
1522
Johan Hedberg56f87902013-10-02 13:43:13 +03001523 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1524 err = -EOPNOTSUPP;
1525 goto done;
1526 }
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 switch (cmd) {
1529 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001530 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1531 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 break;
1533
1534 case HCISETENCRYPT:
1535 if (!lmp_encrypt_capable(hdev)) {
1536 err = -EOPNOTSUPP;
1537 break;
1538 }
1539
1540 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1541 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001542 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1543 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 if (err)
1545 break;
1546 }
1547
Johan Hedberg01178cd2013-03-05 20:37:41 +02001548 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1549 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 break;
1551
1552 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001553 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1554 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 break;
1556
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001557 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001558 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1559 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001560 break;
1561
1562 case HCISETLINKMODE:
1563 hdev->link_mode = ((__u16) dr.dev_opt) &
1564 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1565 break;
1566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 case HCISETPTYPE:
1568 hdev->pkt_type = (__u16) dr.dev_opt;
1569 break;
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001572 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1573 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 break;
1575
1576 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001577 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1578 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 break;
1580
1581 default:
1582 err = -EINVAL;
1583 break;
1584 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001585
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001586done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 hci_dev_put(hdev);
1588 return err;
1589}
1590
1591int hci_get_dev_list(void __user *arg)
1592{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001593 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 struct hci_dev_list_req *dl;
1595 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 int n = 0, size, err;
1597 __u16 dev_num;
1598
1599 if (get_user(dev_num, (__u16 __user *) arg))
1600 return -EFAULT;
1601
1602 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1603 return -EINVAL;
1604
1605 size = sizeof(*dl) + dev_num * sizeof(*dr);
1606
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001607 dl = kzalloc(size, GFP_KERNEL);
1608 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 return -ENOMEM;
1610
1611 dr = dl->dev_req;
1612
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001613 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001614 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001615 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001616 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001617
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001618 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1619 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 (dr + n)->dev_id = hdev->id;
1622 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 if (++n >= dev_num)
1625 break;
1626 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001627 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
1629 dl->dev_num = n;
1630 size = sizeof(*dl) + n * sizeof(*dr);
1631
1632 err = copy_to_user(arg, dl, size);
1633 kfree(dl);
1634
1635 return err ? -EFAULT : 0;
1636}
1637
1638int hci_get_dev_info(void __user *arg)
1639{
1640 struct hci_dev *hdev;
1641 struct hci_dev_info di;
1642 int err = 0;
1643
1644 if (copy_from_user(&di, arg, sizeof(di)))
1645 return -EFAULT;
1646
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001647 hdev = hci_dev_get(di.dev_id);
1648 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 return -ENODEV;
1650
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001652 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001653
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 strcpy(di.name, hdev->name);
1658 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001659 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 di.flags = hdev->flags;
1661 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001662 if (lmp_bredr_capable(hdev)) {
1663 di.acl_mtu = hdev->acl_mtu;
1664 di.acl_pkts = hdev->acl_pkts;
1665 di.sco_mtu = hdev->sco_mtu;
1666 di.sco_pkts = hdev->sco_pkts;
1667 } else {
1668 di.acl_mtu = hdev->le_mtu;
1669 di.acl_pkts = hdev->le_pkts;
1670 di.sco_mtu = 0;
1671 di.sco_pkts = 0;
1672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 di.link_policy = hdev->link_policy;
1674 di.link_mode = hdev->link_mode;
1675
1676 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1677 memcpy(&di.features, &hdev->features, sizeof(di.features));
1678
1679 if (copy_to_user(arg, &di, sizeof(di)))
1680 err = -EFAULT;
1681
1682 hci_dev_put(hdev);
1683
1684 return err;
1685}
1686
1687/* ---- Interface to HCI drivers ---- */
1688
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001689static int hci_rfkill_set_block(void *data, bool blocked)
1690{
1691 struct hci_dev *hdev = data;
1692
1693 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1694
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001695 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1696 return -EBUSY;
1697
Johan Hedberg5e130362013-09-13 08:58:17 +03001698 if (blocked) {
1699 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001700 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1701 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001702 } else {
1703 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001704 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001705
1706 return 0;
1707}
1708
1709static const struct rfkill_ops hci_rfkill_ops = {
1710 .set_block = hci_rfkill_set_block,
1711};
1712
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001713static void hci_power_on(struct work_struct *work)
1714{
1715 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001716 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001717
1718 BT_DBG("%s", hdev->name);
1719
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001720 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001721 if (err < 0) {
1722 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001723 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001724 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001725
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001726 /* During the HCI setup phase, a few error conditions are
1727 * ignored and they need to be checked now. If they are still
1728 * valid, it is important to turn the device back off.
1729 */
1730 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1731 (hdev->dev_type == HCI_BREDR &&
1732 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1733 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001734 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1735 hci_dev_do_close(hdev);
1736 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001737 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1738 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001739 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001740
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001741 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001742 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001743}
1744
1745static void hci_power_off(struct work_struct *work)
1746{
Johan Hedberg32435532011-11-07 22:16:04 +02001747 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001748 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001749
1750 BT_DBG("%s", hdev->name);
1751
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001752 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001753}
1754
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001755static void hci_discov_off(struct work_struct *work)
1756{
1757 struct hci_dev *hdev;
1758 u8 scan = SCAN_PAGE;
1759
1760 hdev = container_of(work, struct hci_dev, discov_off.work);
1761
1762 BT_DBG("%s", hdev->name);
1763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001764 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001765
1766 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1767
1768 hdev->discov_timeout = 0;
1769
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001770 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001771}
1772
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001773int hci_uuids_clear(struct hci_dev *hdev)
1774{
Johan Hedberg48210022013-01-27 00:31:28 +02001775 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001776
Johan Hedberg48210022013-01-27 00:31:28 +02001777 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1778 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001779 kfree(uuid);
1780 }
1781
1782 return 0;
1783}
1784
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001785int hci_link_keys_clear(struct hci_dev *hdev)
1786{
1787 struct list_head *p, *n;
1788
1789 list_for_each_safe(p, n, &hdev->link_keys) {
1790 struct link_key *key;
1791
1792 key = list_entry(p, struct link_key, list);
1793
1794 list_del(p);
1795 kfree(key);
1796 }
1797
1798 return 0;
1799}
1800
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001801int hci_smp_ltks_clear(struct hci_dev *hdev)
1802{
1803 struct smp_ltk *k, *tmp;
1804
1805 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1806 list_del(&k->list);
1807 kfree(k);
1808 }
1809
1810 return 0;
1811}
1812
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001813struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1814{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001815 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001816
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001817 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001818 if (bacmp(bdaddr, &k->bdaddr) == 0)
1819 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001820
1821 return NULL;
1822}
1823
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301824static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001825 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001826{
1827 /* Legacy key */
1828 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301829 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001830
1831 /* Debug keys are insecure so don't store them persistently */
1832 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301833 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001834
1835 /* Changed combination key and there's no previous one */
1836 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301837 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001838
1839 /* Security mode 3 case */
1840 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301841 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001842
1843 /* Neither local nor remote side had no-bonding as requirement */
1844 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301845 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001846
1847 /* Local side had dedicated bonding as requirement */
1848 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301849 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001850
1851 /* Remote side had dedicated bonding as requirement */
1852 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301853 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001854
1855 /* If none of the above criteria match, then don't store the key
1856 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301857 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001858}
1859
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001860struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001861{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001862 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001863
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001864 list_for_each_entry(k, &hdev->long_term_keys, list) {
1865 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001866 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001867 continue;
1868
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001869 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001870 }
1871
1872 return NULL;
1873}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001874
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001875struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001876 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001877{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001878 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001879
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001880 list_for_each_entry(k, &hdev->long_term_keys, list)
1881 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001882 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001883 return k;
1884
1885 return NULL;
1886}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001887
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001888int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001889 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001890{
1891 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301892 u8 old_key_type;
1893 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001894
1895 old_key = hci_find_link_key(hdev, bdaddr);
1896 if (old_key) {
1897 old_key_type = old_key->type;
1898 key = old_key;
1899 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001900 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001901 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1902 if (!key)
1903 return -ENOMEM;
1904 list_add(&key->list, &hdev->link_keys);
1905 }
1906
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001907 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001908
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001909 /* Some buggy controller combinations generate a changed
1910 * combination key for legacy pairing even when there's no
1911 * previous key */
1912 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001913 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001914 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001915 if (conn)
1916 conn->key_type = type;
1917 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001918
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001919 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001920 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001921 key->pin_len = pin_len;
1922
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001923 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001924 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001925 else
1926 key->type = type;
1927
Johan Hedberg4df378a2011-04-28 11:29:03 -07001928 if (!new_key)
1929 return 0;
1930
1931 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1932
Johan Hedberg744cf192011-11-08 20:40:14 +02001933 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001934
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301935 if (conn)
1936 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001937
1938 return 0;
1939}
1940
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001941int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001942 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001943 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001944{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001945 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001946
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001947 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1948 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001949
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001950 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1951 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001952 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001953 else {
1954 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001955 if (!key)
1956 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001957 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001958 }
1959
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001960 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001961 key->bdaddr_type = addr_type;
1962 memcpy(key->val, tk, sizeof(key->val));
1963 key->authenticated = authenticated;
1964 key->ediv = ediv;
1965 key->enc_size = enc_size;
1966 key->type = type;
1967 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001968
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001969 if (!new_key)
1970 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001971
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001972 if (type & HCI_SMP_LTK)
1973 mgmt_new_ltk(hdev, key, 1);
1974
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001975 return 0;
1976}
1977
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001978int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1979{
1980 struct link_key *key;
1981
1982 key = hci_find_link_key(hdev, bdaddr);
1983 if (!key)
1984 return -ENOENT;
1985
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001986 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001987
1988 list_del(&key->list);
1989 kfree(key);
1990
1991 return 0;
1992}
1993
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001994int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1995{
1996 struct smp_ltk *k, *tmp;
1997
1998 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1999 if (bacmp(bdaddr, &k->bdaddr))
2000 continue;
2001
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002002 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002003
2004 list_del(&k->list);
2005 kfree(k);
2006 }
2007
2008 return 0;
2009}
2010
Ville Tervo6bd32322011-02-16 16:32:41 +02002011/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002012static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002013{
2014 struct hci_dev *hdev = (void *) arg;
2015
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002016 if (hdev->sent_cmd) {
2017 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2018 u16 opcode = __le16_to_cpu(sent->opcode);
2019
2020 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2021 } else {
2022 BT_ERR("%s command tx timeout", hdev->name);
2023 }
2024
Ville Tervo6bd32322011-02-16 16:32:41 +02002025 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002026 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002027}
2028
Szymon Janc2763eda2011-03-22 13:12:22 +01002029struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002030 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002031{
2032 struct oob_data *data;
2033
2034 list_for_each_entry(data, &hdev->remote_oob_data, list)
2035 if (bacmp(bdaddr, &data->bdaddr) == 0)
2036 return data;
2037
2038 return NULL;
2039}
2040
2041int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2042{
2043 struct oob_data *data;
2044
2045 data = hci_find_remote_oob_data(hdev, bdaddr);
2046 if (!data)
2047 return -ENOENT;
2048
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002049 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002050
2051 list_del(&data->list);
2052 kfree(data);
2053
2054 return 0;
2055}
2056
2057int hci_remote_oob_data_clear(struct hci_dev *hdev)
2058{
2059 struct oob_data *data, *n;
2060
2061 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2062 list_del(&data->list);
2063 kfree(data);
2064 }
2065
2066 return 0;
2067}
2068
2069int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002070 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002071{
2072 struct oob_data *data;
2073
2074 data = hci_find_remote_oob_data(hdev, bdaddr);
2075
2076 if (!data) {
2077 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2078 if (!data)
2079 return -ENOMEM;
2080
2081 bacpy(&data->bdaddr, bdaddr);
2082 list_add(&data->list, &hdev->remote_oob_data);
2083 }
2084
2085 memcpy(data->hash, hash, sizeof(data->hash));
2086 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2087
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002088 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002089
2090 return 0;
2091}
2092
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002093struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002094{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002095 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002096
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002097 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002098 if (bacmp(bdaddr, &b->bdaddr) == 0)
2099 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002100
2101 return NULL;
2102}
2103
2104int hci_blacklist_clear(struct hci_dev *hdev)
2105{
2106 struct list_head *p, *n;
2107
2108 list_for_each_safe(p, n, &hdev->blacklist) {
2109 struct bdaddr_list *b;
2110
2111 b = list_entry(p, struct bdaddr_list, list);
2112
2113 list_del(p);
2114 kfree(b);
2115 }
2116
2117 return 0;
2118}
2119
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002120int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002121{
2122 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002123
2124 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2125 return -EBADF;
2126
Antti Julku5e762442011-08-25 16:48:02 +03002127 if (hci_blacklist_lookup(hdev, bdaddr))
2128 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002129
2130 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002131 if (!entry)
2132 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002133
2134 bacpy(&entry->bdaddr, bdaddr);
2135
2136 list_add(&entry->list, &hdev->blacklist);
2137
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002138 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002139}
2140
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002141int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002142{
2143 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002144
Szymon Janc1ec918c2011-11-16 09:32:21 +01002145 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002146 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002147
2148 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002149 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002150 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002151
2152 list_del(&entry->list);
2153 kfree(entry);
2154
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002155 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002156}
2157
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002158static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002159{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002160 if (status) {
2161 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002162
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002163 hci_dev_lock(hdev);
2164 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2165 hci_dev_unlock(hdev);
2166 return;
2167 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002168}
2169
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002170static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002171{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002172 /* General inquiry access code (GIAC) */
2173 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2174 struct hci_request req;
2175 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002176 int err;
2177
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002178 if (status) {
2179 BT_ERR("Failed to disable LE scanning: status %d", status);
2180 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002181 }
2182
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002183 switch (hdev->discovery.type) {
2184 case DISCOV_TYPE_LE:
2185 hci_dev_lock(hdev);
2186 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2187 hci_dev_unlock(hdev);
2188 break;
2189
2190 case DISCOV_TYPE_INTERLEAVED:
2191 hci_req_init(&req, hdev);
2192
2193 memset(&cp, 0, sizeof(cp));
2194 memcpy(&cp.lap, lap, sizeof(cp.lap));
2195 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2196 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2197
2198 hci_dev_lock(hdev);
2199
2200 hci_inquiry_cache_flush(hdev);
2201
2202 err = hci_req_run(&req, inquiry_complete);
2203 if (err) {
2204 BT_ERR("Inquiry request failed: err %d", err);
2205 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2206 }
2207
2208 hci_dev_unlock(hdev);
2209 break;
2210 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002211}
2212
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002213static void le_scan_disable_work(struct work_struct *work)
2214{
2215 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002216 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002217 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002218 struct hci_request req;
2219 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002220
2221 BT_DBG("%s", hdev->name);
2222
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002223 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002224
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002225 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002226 cp.enable = LE_SCAN_DISABLE;
2227 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002228
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002229 err = hci_req_run(&req, le_scan_disable_work_complete);
2230 if (err)
2231 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002232}
2233
David Herrmann9be0dab2012-04-22 14:39:57 +02002234/* Alloc HCI device */
2235struct hci_dev *hci_alloc_dev(void)
2236{
2237 struct hci_dev *hdev;
2238
2239 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2240 if (!hdev)
2241 return NULL;
2242
David Herrmannb1b813d2012-04-22 14:39:58 +02002243 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2244 hdev->esco_type = (ESCO_HV1);
2245 hdev->link_mode = (HCI_LM_ACCEPT);
2246 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002247 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2248 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002249
David Herrmannb1b813d2012-04-22 14:39:58 +02002250 hdev->sniff_max_interval = 800;
2251 hdev->sniff_min_interval = 80;
2252
2253 mutex_init(&hdev->lock);
2254 mutex_init(&hdev->req_lock);
2255
2256 INIT_LIST_HEAD(&hdev->mgmt_pending);
2257 INIT_LIST_HEAD(&hdev->blacklist);
2258 INIT_LIST_HEAD(&hdev->uuids);
2259 INIT_LIST_HEAD(&hdev->link_keys);
2260 INIT_LIST_HEAD(&hdev->long_term_keys);
2261 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002262 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002263
2264 INIT_WORK(&hdev->rx_work, hci_rx_work);
2265 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2266 INIT_WORK(&hdev->tx_work, hci_tx_work);
2267 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002268
David Herrmannb1b813d2012-04-22 14:39:58 +02002269 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2270 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2271 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2272
David Herrmannb1b813d2012-04-22 14:39:58 +02002273 skb_queue_head_init(&hdev->rx_q);
2274 skb_queue_head_init(&hdev->cmd_q);
2275 skb_queue_head_init(&hdev->raw_q);
2276
2277 init_waitqueue_head(&hdev->req_wait_q);
2278
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002279 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002280
David Herrmannb1b813d2012-04-22 14:39:58 +02002281 hci_init_sysfs(hdev);
2282 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002283
2284 return hdev;
2285}
2286EXPORT_SYMBOL(hci_alloc_dev);
2287
2288/* Free HCI device */
2289void hci_free_dev(struct hci_dev *hdev)
2290{
David Herrmann9be0dab2012-04-22 14:39:57 +02002291 /* will free via device release */
2292 put_device(&hdev->dev);
2293}
2294EXPORT_SYMBOL(hci_free_dev);
2295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296/* Register HCI device */
2297int hci_register_dev(struct hci_dev *hdev)
2298{
David Herrmannb1b813d2012-04-22 14:39:58 +02002299 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
David Herrmann010666a2012-01-07 15:47:07 +01002301 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 return -EINVAL;
2303
Mat Martineau08add512011-11-02 16:18:36 -07002304 /* Do not allow HCI_AMP devices to register at index 0,
2305 * so the index can be used as the AMP controller ID.
2306 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002307 switch (hdev->dev_type) {
2308 case HCI_BREDR:
2309 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2310 break;
2311 case HCI_AMP:
2312 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2313 break;
2314 default:
2315 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002317
Sasha Levin3df92b32012-05-27 22:36:56 +02002318 if (id < 0)
2319 return id;
2320
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 sprintf(hdev->name, "hci%d", id);
2322 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002323
2324 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2325
Kees Cookd8537542013-07-03 15:04:57 -07002326 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2327 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002328 if (!hdev->workqueue) {
2329 error = -ENOMEM;
2330 goto err;
2331 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002332
Kees Cookd8537542013-07-03 15:04:57 -07002333 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2334 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002335 if (!hdev->req_workqueue) {
2336 destroy_workqueue(hdev->workqueue);
2337 error = -ENOMEM;
2338 goto err;
2339 }
2340
David Herrmann33ca9542011-10-08 14:58:49 +02002341 error = hci_add_sysfs(hdev);
2342 if (error < 0)
2343 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002345 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002346 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2347 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002348 if (hdev->rfkill) {
2349 if (rfkill_register(hdev->rfkill) < 0) {
2350 rfkill_destroy(hdev->rfkill);
2351 hdev->rfkill = NULL;
2352 }
2353 }
2354
Johan Hedberg5e130362013-09-13 08:58:17 +03002355 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2356 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2357
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002359
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002360 if (hdev->dev_type == HCI_BREDR) {
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002361 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg56f87902013-10-02 13:43:13 +03002362 /* Assume BR/EDR support until proven otherwise (such as
2363 * through reading supported features during init.
2364 */
2365 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2366 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002367
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002368 write_lock(&hci_dev_list_lock);
2369 list_add(&hdev->list, &hci_dev_list);
2370 write_unlock(&hci_dev_list_lock);
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002373 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
Johan Hedberg19202572013-01-14 22:33:51 +02002375 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002378
David Herrmann33ca9542011-10-08 14:58:49 +02002379err_wqueue:
2380 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002381 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002382err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002383 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002384
David Herrmann33ca9542011-10-08 14:58:49 +02002385 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387EXPORT_SYMBOL(hci_register_dev);
2388
2389/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002390void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391{
Sasha Levin3df92b32012-05-27 22:36:56 +02002392 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002393
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002394 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
Johan Hovold94324962012-03-15 14:48:41 +01002396 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2397
Sasha Levin3df92b32012-05-27 22:36:56 +02002398 id = hdev->id;
2399
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002400 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002402 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404 hci_dev_do_close(hdev);
2405
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302406 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002407 kfree_skb(hdev->reassembly[i]);
2408
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002409 cancel_work_sync(&hdev->power_on);
2410
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002411 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002412 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002413 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002414 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002415 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002416 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002417
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002418 /* mgmt_index_removed should take care of emptying the
2419 * pending list */
2420 BUG_ON(!list_empty(&hdev->mgmt_pending));
2421
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 hci_notify(hdev, HCI_DEV_UNREG);
2423
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002424 if (hdev->rfkill) {
2425 rfkill_unregister(hdev->rfkill);
2426 rfkill_destroy(hdev->rfkill);
2427 }
2428
David Herrmannce242972011-10-08 14:58:48 +02002429 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002430
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002431 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002432 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002433
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002434 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002435 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002436 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002437 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002438 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002439 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002440 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002441
David Herrmanndc946bd2012-01-07 15:47:24 +01002442 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002443
2444 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445}
2446EXPORT_SYMBOL(hci_unregister_dev);
2447
2448/* Suspend HCI device */
2449int hci_suspend_dev(struct hci_dev *hdev)
2450{
2451 hci_notify(hdev, HCI_DEV_SUSPEND);
2452 return 0;
2453}
2454EXPORT_SYMBOL(hci_suspend_dev);
2455
2456/* Resume HCI device */
2457int hci_resume_dev(struct hci_dev *hdev)
2458{
2459 hci_notify(hdev, HCI_DEV_RESUME);
2460 return 0;
2461}
2462EXPORT_SYMBOL(hci_resume_dev);
2463
Marcel Holtmann76bca882009-11-18 00:40:39 +01002464/* Receive frame from HCI drivers */
2465int hci_recv_frame(struct sk_buff *skb)
2466{
2467 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2468 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002469 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002470 kfree_skb(skb);
2471 return -ENXIO;
2472 }
2473
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002474 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002475 bt_cb(skb)->incoming = 1;
2476
2477 /* Time stamp */
2478 __net_timestamp(skb);
2479
Marcel Holtmann76bca882009-11-18 00:40:39 +01002480 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002481 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002482
Marcel Holtmann76bca882009-11-18 00:40:39 +01002483 return 0;
2484}
2485EXPORT_SYMBOL(hci_recv_frame);
2486
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302487static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002488 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302489{
2490 int len = 0;
2491 int hlen = 0;
2492 int remain = count;
2493 struct sk_buff *skb;
2494 struct bt_skb_cb *scb;
2495
2496 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002497 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302498 return -EILSEQ;
2499
2500 skb = hdev->reassembly[index];
2501
2502 if (!skb) {
2503 switch (type) {
2504 case HCI_ACLDATA_PKT:
2505 len = HCI_MAX_FRAME_SIZE;
2506 hlen = HCI_ACL_HDR_SIZE;
2507 break;
2508 case HCI_EVENT_PKT:
2509 len = HCI_MAX_EVENT_SIZE;
2510 hlen = HCI_EVENT_HDR_SIZE;
2511 break;
2512 case HCI_SCODATA_PKT:
2513 len = HCI_MAX_SCO_SIZE;
2514 hlen = HCI_SCO_HDR_SIZE;
2515 break;
2516 }
2517
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002518 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302519 if (!skb)
2520 return -ENOMEM;
2521
2522 scb = (void *) skb->cb;
2523 scb->expect = hlen;
2524 scb->pkt_type = type;
2525
2526 skb->dev = (void *) hdev;
2527 hdev->reassembly[index] = skb;
2528 }
2529
2530 while (count) {
2531 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002532 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302533
2534 memcpy(skb_put(skb, len), data, len);
2535
2536 count -= len;
2537 data += len;
2538 scb->expect -= len;
2539 remain = count;
2540
2541 switch (type) {
2542 case HCI_EVENT_PKT:
2543 if (skb->len == HCI_EVENT_HDR_SIZE) {
2544 struct hci_event_hdr *h = hci_event_hdr(skb);
2545 scb->expect = h->plen;
2546
2547 if (skb_tailroom(skb) < scb->expect) {
2548 kfree_skb(skb);
2549 hdev->reassembly[index] = NULL;
2550 return -ENOMEM;
2551 }
2552 }
2553 break;
2554
2555 case HCI_ACLDATA_PKT:
2556 if (skb->len == HCI_ACL_HDR_SIZE) {
2557 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2558 scb->expect = __le16_to_cpu(h->dlen);
2559
2560 if (skb_tailroom(skb) < scb->expect) {
2561 kfree_skb(skb);
2562 hdev->reassembly[index] = NULL;
2563 return -ENOMEM;
2564 }
2565 }
2566 break;
2567
2568 case HCI_SCODATA_PKT:
2569 if (skb->len == HCI_SCO_HDR_SIZE) {
2570 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2571 scb->expect = h->dlen;
2572
2573 if (skb_tailroom(skb) < scb->expect) {
2574 kfree_skb(skb);
2575 hdev->reassembly[index] = NULL;
2576 return -ENOMEM;
2577 }
2578 }
2579 break;
2580 }
2581
2582 if (scb->expect == 0) {
2583 /* Complete frame */
2584
2585 bt_cb(skb)->pkt_type = type;
2586 hci_recv_frame(skb);
2587
2588 hdev->reassembly[index] = NULL;
2589 return remain;
2590 }
2591 }
2592
2593 return remain;
2594}
2595
Marcel Holtmannef222012007-07-11 06:42:04 +02002596int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2597{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302598 int rem = 0;
2599
Marcel Holtmannef222012007-07-11 06:42:04 +02002600 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2601 return -EILSEQ;
2602
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002603 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002604 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302605 if (rem < 0)
2606 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002607
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302608 data += (count - rem);
2609 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002610 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002611
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302612 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002613}
2614EXPORT_SYMBOL(hci_recv_fragment);
2615
Suraj Sumangala99811512010-07-14 13:02:19 +05302616#define STREAM_REASSEMBLY 0
2617
2618int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2619{
2620 int type;
2621 int rem = 0;
2622
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002623 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302624 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2625
2626 if (!skb) {
2627 struct { char type; } *pkt;
2628
2629 /* Start of the frame */
2630 pkt = data;
2631 type = pkt->type;
2632
2633 data++;
2634 count--;
2635 } else
2636 type = bt_cb(skb)->pkt_type;
2637
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002638 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002639 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302640 if (rem < 0)
2641 return rem;
2642
2643 data += (count - rem);
2644 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002645 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302646
2647 return rem;
2648}
2649EXPORT_SYMBOL(hci_recv_stream_fragment);
2650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651/* ---- Interface to upper protocols ---- */
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653int hci_register_cb(struct hci_cb *cb)
2654{
2655 BT_DBG("%p name %s", cb, cb->name);
2656
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002657 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002659 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661 return 0;
2662}
2663EXPORT_SYMBOL(hci_register_cb);
2664
2665int hci_unregister_cb(struct hci_cb *cb)
2666{
2667 BT_DBG("%p name %s", cb, cb->name);
2668
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002669 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002671 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
2673 return 0;
2674}
2675EXPORT_SYMBOL(hci_unregister_cb);
2676
2677static int hci_send_frame(struct sk_buff *skb)
2678{
2679 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2680
2681 if (!hdev) {
2682 kfree_skb(skb);
2683 return -ENODEV;
2684 }
2685
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002686 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002688 /* Time stamp */
2689 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002691 /* Send copy to monitor */
2692 hci_send_to_monitor(hdev, skb);
2693
2694 if (atomic_read(&hdev->promisc)) {
2695 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002696 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 }
2698
2699 /* Get rid of skb owner, prior to sending to the driver. */
2700 skb_orphan(skb);
2701
2702 return hdev->send(skb);
2703}
2704
Johan Hedberg3119ae92013-03-05 20:37:44 +02002705void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2706{
2707 skb_queue_head_init(&req->cmd_q);
2708 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002709 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002710}
2711
2712int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2713{
2714 struct hci_dev *hdev = req->hdev;
2715 struct sk_buff *skb;
2716 unsigned long flags;
2717
2718 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2719
Andre Guedes5d73e032013-03-08 11:20:16 -03002720 /* If an error occured during request building, remove all HCI
2721 * commands queued on the HCI request queue.
2722 */
2723 if (req->err) {
2724 skb_queue_purge(&req->cmd_q);
2725 return req->err;
2726 }
2727
Johan Hedberg3119ae92013-03-05 20:37:44 +02002728 /* Do not allow empty requests */
2729 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002730 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002731
2732 skb = skb_peek_tail(&req->cmd_q);
2733 bt_cb(skb)->req.complete = complete;
2734
2735 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2736 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2737 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2738
2739 queue_work(hdev->workqueue, &hdev->cmd_work);
2740
2741 return 0;
2742}
2743
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002744static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002745 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746{
2747 int len = HCI_COMMAND_HDR_SIZE + plen;
2748 struct hci_command_hdr *hdr;
2749 struct sk_buff *skb;
2750
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002752 if (!skb)
2753 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754
2755 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002756 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 hdr->plen = plen;
2758
2759 if (plen)
2760 memcpy(skb_put(skb, plen), param, plen);
2761
2762 BT_DBG("skb len %d", skb->len);
2763
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002764 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002766
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002767 return skb;
2768}
2769
2770/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002771int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2772 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002773{
2774 struct sk_buff *skb;
2775
2776 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2777
2778 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2779 if (!skb) {
2780 BT_ERR("%s no memory for command", hdev->name);
2781 return -ENOMEM;
2782 }
2783
Johan Hedberg11714b32013-03-05 20:37:47 +02002784 /* Stand-alone HCI commands must be flaged as
2785 * single-command requests.
2786 */
2787 bt_cb(skb)->req.start = true;
2788
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002790 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
2792 return 0;
2793}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Johan Hedberg71c76a12013-03-05 20:37:46 +02002795/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002796void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2797 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002798{
2799 struct hci_dev *hdev = req->hdev;
2800 struct sk_buff *skb;
2801
2802 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2803
Andre Guedes34739c12013-03-08 11:20:18 -03002804 /* If an error occured during request building, there is no point in
2805 * queueing the HCI command. We can simply return.
2806 */
2807 if (req->err)
2808 return;
2809
Johan Hedberg71c76a12013-03-05 20:37:46 +02002810 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2811 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002812 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2813 hdev->name, opcode);
2814 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002815 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002816 }
2817
2818 if (skb_queue_empty(&req->cmd_q))
2819 bt_cb(skb)->req.start = true;
2820
Johan Hedberg02350a72013-04-03 21:50:29 +03002821 bt_cb(skb)->req.event = event;
2822
Johan Hedberg71c76a12013-03-05 20:37:46 +02002823 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002824}
2825
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002826void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2827 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002828{
2829 hci_req_add_ev(req, opcode, plen, param, 0);
2830}
2831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002833void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834{
2835 struct hci_command_hdr *hdr;
2836
2837 if (!hdev->sent_cmd)
2838 return NULL;
2839
2840 hdr = (void *) hdev->sent_cmd->data;
2841
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002842 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 return NULL;
2844
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002845 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846
2847 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2848}
2849
2850/* Send ACL data */
2851static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2852{
2853 struct hci_acl_hdr *hdr;
2854 int len = skb->len;
2855
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002856 skb_push(skb, HCI_ACL_HDR_SIZE);
2857 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002858 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002859 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2860 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861}
2862
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002863static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002864 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002866 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 struct hci_dev *hdev = conn->hdev;
2868 struct sk_buff *list;
2869
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002870 skb->len = skb_headlen(skb);
2871 skb->data_len = 0;
2872
2873 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002874
2875 switch (hdev->dev_type) {
2876 case HCI_BREDR:
2877 hci_add_acl_hdr(skb, conn->handle, flags);
2878 break;
2879 case HCI_AMP:
2880 hci_add_acl_hdr(skb, chan->handle, flags);
2881 break;
2882 default:
2883 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2884 return;
2885 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002887 list = skb_shinfo(skb)->frag_list;
2888 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 /* Non fragmented */
2890 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2891
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002892 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 } else {
2894 /* Fragmented */
2895 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2896
2897 skb_shinfo(skb)->frag_list = NULL;
2898
2899 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002900 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002902 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002903
2904 flags &= ~ACL_START;
2905 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 do {
2907 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002910 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002911 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
2913 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2914
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002915 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 } while (list);
2917
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002918 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002920}
2921
2922void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2923{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002924 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002925
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002926 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002927
2928 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002929
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002930 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002932 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
2935/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002936void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937{
2938 struct hci_dev *hdev = conn->hdev;
2939 struct hci_sco_hdr hdr;
2940
2941 BT_DBG("%s len %d", hdev->name, skb->len);
2942
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002943 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 hdr.dlen = skb->len;
2945
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002946 skb_push(skb, HCI_SCO_HDR_SIZE);
2947 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002948 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949
2950 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002951 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002952
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002954 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
2957/* ---- HCI TX task (outgoing data) ---- */
2958
2959/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002960static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2961 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962{
2963 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002964 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002965 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002967 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002969
2970 rcu_read_lock();
2971
2972 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002973 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002975
2976 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2977 continue;
2978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 num++;
2980
2981 if (c->sent < min) {
2982 min = c->sent;
2983 conn = c;
2984 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002985
2986 if (hci_conn_num(hdev, type) == num)
2987 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 }
2989
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002990 rcu_read_unlock();
2991
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002993 int cnt, q;
2994
2995 switch (conn->type) {
2996 case ACL_LINK:
2997 cnt = hdev->acl_cnt;
2998 break;
2999 case SCO_LINK:
3000 case ESCO_LINK:
3001 cnt = hdev->sco_cnt;
3002 break;
3003 case LE_LINK:
3004 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3005 break;
3006 default:
3007 cnt = 0;
3008 BT_ERR("Unknown link type");
3009 }
3010
3011 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 *quote = q ? q : 1;
3013 } else
3014 *quote = 0;
3015
3016 BT_DBG("conn %p quote %d", conn, *quote);
3017 return conn;
3018}
3019
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003020static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021{
3022 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003023 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024
Ville Tervobae1f5d92011-02-10 22:38:53 -03003025 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003027 rcu_read_lock();
3028
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003030 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003031 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003032 BT_ERR("%s killing stalled connection %pMR",
3033 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003034 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 }
3036 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003037
3038 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039}
3040
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003041static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3042 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003043{
3044 struct hci_conn_hash *h = &hdev->conn_hash;
3045 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003046 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003047 struct hci_conn *conn;
3048 int cnt, q, conn_num = 0;
3049
3050 BT_DBG("%s", hdev->name);
3051
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003052 rcu_read_lock();
3053
3054 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003055 struct hci_chan *tmp;
3056
3057 if (conn->type != type)
3058 continue;
3059
3060 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3061 continue;
3062
3063 conn_num++;
3064
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003065 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003066 struct sk_buff *skb;
3067
3068 if (skb_queue_empty(&tmp->data_q))
3069 continue;
3070
3071 skb = skb_peek(&tmp->data_q);
3072 if (skb->priority < cur_prio)
3073 continue;
3074
3075 if (skb->priority > cur_prio) {
3076 num = 0;
3077 min = ~0;
3078 cur_prio = skb->priority;
3079 }
3080
3081 num++;
3082
3083 if (conn->sent < min) {
3084 min = conn->sent;
3085 chan = tmp;
3086 }
3087 }
3088
3089 if (hci_conn_num(hdev, type) == conn_num)
3090 break;
3091 }
3092
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003093 rcu_read_unlock();
3094
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003095 if (!chan)
3096 return NULL;
3097
3098 switch (chan->conn->type) {
3099 case ACL_LINK:
3100 cnt = hdev->acl_cnt;
3101 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003102 case AMP_LINK:
3103 cnt = hdev->block_cnt;
3104 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003105 case SCO_LINK:
3106 case ESCO_LINK:
3107 cnt = hdev->sco_cnt;
3108 break;
3109 case LE_LINK:
3110 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3111 break;
3112 default:
3113 cnt = 0;
3114 BT_ERR("Unknown link type");
3115 }
3116
3117 q = cnt / num;
3118 *quote = q ? q : 1;
3119 BT_DBG("chan %p quote %d", chan, *quote);
3120 return chan;
3121}
3122
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003123static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3124{
3125 struct hci_conn_hash *h = &hdev->conn_hash;
3126 struct hci_conn *conn;
3127 int num = 0;
3128
3129 BT_DBG("%s", hdev->name);
3130
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003131 rcu_read_lock();
3132
3133 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003134 struct hci_chan *chan;
3135
3136 if (conn->type != type)
3137 continue;
3138
3139 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3140 continue;
3141
3142 num++;
3143
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003144 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003145 struct sk_buff *skb;
3146
3147 if (chan->sent) {
3148 chan->sent = 0;
3149 continue;
3150 }
3151
3152 if (skb_queue_empty(&chan->data_q))
3153 continue;
3154
3155 skb = skb_peek(&chan->data_q);
3156 if (skb->priority >= HCI_PRIO_MAX - 1)
3157 continue;
3158
3159 skb->priority = HCI_PRIO_MAX - 1;
3160
3161 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003162 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003163 }
3164
3165 if (hci_conn_num(hdev, type) == num)
3166 break;
3167 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003168
3169 rcu_read_unlock();
3170
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003171}
3172
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003173static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3174{
3175 /* Calculate count of blocks used by this packet */
3176 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3177}
3178
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003179static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 if (!test_bit(HCI_RAW, &hdev->flags)) {
3182 /* ACL tx timeout must be longer than maximum
3183 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003184 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003185 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003186 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003188}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003190static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003191{
3192 unsigned int cnt = hdev->acl_cnt;
3193 struct hci_chan *chan;
3194 struct sk_buff *skb;
3195 int quote;
3196
3197 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003198
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003199 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003200 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003201 u32 priority = (skb_peek(&chan->data_q))->priority;
3202 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003203 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003204 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003205
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003206 /* Stop if priority has changed */
3207 if (skb->priority < priority)
3208 break;
3209
3210 skb = skb_dequeue(&chan->data_q);
3211
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003212 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003213 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003214
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 hci_send_frame(skb);
3216 hdev->acl_last_tx = jiffies;
3217
3218 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003219 chan->sent++;
3220 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 }
3222 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003223
3224 if (cnt != hdev->acl_cnt)
3225 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226}
3227
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003228static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003229{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003230 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003231 struct hci_chan *chan;
3232 struct sk_buff *skb;
3233 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003234 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003235
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003236 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003237
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003238 BT_DBG("%s", hdev->name);
3239
3240 if (hdev->dev_type == HCI_AMP)
3241 type = AMP_LINK;
3242 else
3243 type = ACL_LINK;
3244
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003245 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003246 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003247 u32 priority = (skb_peek(&chan->data_q))->priority;
3248 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3249 int blocks;
3250
3251 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003252 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253
3254 /* Stop if priority has changed */
3255 if (skb->priority < priority)
3256 break;
3257
3258 skb = skb_dequeue(&chan->data_q);
3259
3260 blocks = __get_blocks(hdev, skb);
3261 if (blocks > hdev->block_cnt)
3262 return;
3263
3264 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003265 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003266
3267 hci_send_frame(skb);
3268 hdev->acl_last_tx = jiffies;
3269
3270 hdev->block_cnt -= blocks;
3271 quote -= blocks;
3272
3273 chan->sent += blocks;
3274 chan->conn->sent += blocks;
3275 }
3276 }
3277
3278 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003279 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003280}
3281
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003282static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003283{
3284 BT_DBG("%s", hdev->name);
3285
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003286 /* No ACL link over BR/EDR controller */
3287 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3288 return;
3289
3290 /* No AMP link over AMP controller */
3291 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003292 return;
3293
3294 switch (hdev->flow_ctl_mode) {
3295 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3296 hci_sched_acl_pkt(hdev);
3297 break;
3298
3299 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3300 hci_sched_acl_blk(hdev);
3301 break;
3302 }
3303}
3304
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003306static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307{
3308 struct hci_conn *conn;
3309 struct sk_buff *skb;
3310 int quote;
3311
3312 BT_DBG("%s", hdev->name);
3313
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003314 if (!hci_conn_num(hdev, SCO_LINK))
3315 return;
3316
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3318 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3319 BT_DBG("skb %p len %d", skb, skb->len);
3320 hci_send_frame(skb);
3321
3322 conn->sent++;
3323 if (conn->sent == ~0)
3324 conn->sent = 0;
3325 }
3326 }
3327}
3328
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003329static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003330{
3331 struct hci_conn *conn;
3332 struct sk_buff *skb;
3333 int quote;
3334
3335 BT_DBG("%s", hdev->name);
3336
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003337 if (!hci_conn_num(hdev, ESCO_LINK))
3338 return;
3339
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003340 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3341 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003342 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3343 BT_DBG("skb %p len %d", skb, skb->len);
3344 hci_send_frame(skb);
3345
3346 conn->sent++;
3347 if (conn->sent == ~0)
3348 conn->sent = 0;
3349 }
3350 }
3351}
3352
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003353static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003354{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003355 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003356 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003357 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003358
3359 BT_DBG("%s", hdev->name);
3360
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003361 if (!hci_conn_num(hdev, LE_LINK))
3362 return;
3363
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003364 if (!test_bit(HCI_RAW, &hdev->flags)) {
3365 /* LE tx timeout must be longer than maximum
3366 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003367 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003368 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003369 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003370 }
3371
3372 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003373 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003374 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003375 u32 priority = (skb_peek(&chan->data_q))->priority;
3376 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003377 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003378 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003379
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003380 /* Stop if priority has changed */
3381 if (skb->priority < priority)
3382 break;
3383
3384 skb = skb_dequeue(&chan->data_q);
3385
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003386 hci_send_frame(skb);
3387 hdev->le_last_tx = jiffies;
3388
3389 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003390 chan->sent++;
3391 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003392 }
3393 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003394
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003395 if (hdev->le_pkts)
3396 hdev->le_cnt = cnt;
3397 else
3398 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003399
3400 if (cnt != tmp)
3401 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003402}
3403
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003404static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003406 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 struct sk_buff *skb;
3408
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003409 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003410 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
Marcel Holtmann52de5992013-09-03 18:08:38 -07003412 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3413 /* Schedule queues and send stuff to HCI driver */
3414 hci_sched_acl(hdev);
3415 hci_sched_sco(hdev);
3416 hci_sched_esco(hdev);
3417 hci_sched_le(hdev);
3418 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003419
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 /* Send next queued raw (unknown type) packet */
3421 while ((skb = skb_dequeue(&hdev->raw_q)))
3422 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423}
3424
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003425/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426
3427/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003428static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429{
3430 struct hci_acl_hdr *hdr = (void *) skb->data;
3431 struct hci_conn *conn;
3432 __u16 handle, flags;
3433
3434 skb_pull(skb, HCI_ACL_HDR_SIZE);
3435
3436 handle = __le16_to_cpu(hdr->handle);
3437 flags = hci_flags(handle);
3438 handle = hci_handle(handle);
3439
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003440 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003441 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
3443 hdev->stat.acl_rx++;
3444
3445 hci_dev_lock(hdev);
3446 conn = hci_conn_hash_lookup_handle(hdev, handle);
3447 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003448
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003450 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003453 l2cap_recv_acldata(conn, skb, flags);
3454 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003456 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003457 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 }
3459
3460 kfree_skb(skb);
3461}
3462
3463/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003464static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465{
3466 struct hci_sco_hdr *hdr = (void *) skb->data;
3467 struct hci_conn *conn;
3468 __u16 handle;
3469
3470 skb_pull(skb, HCI_SCO_HDR_SIZE);
3471
3472 handle = __le16_to_cpu(hdr->handle);
3473
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003474 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
3476 hdev->stat.sco_rx++;
3477
3478 hci_dev_lock(hdev);
3479 conn = hci_conn_hash_lookup_handle(hdev, handle);
3480 hci_dev_unlock(hdev);
3481
3482 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003484 sco_recv_scodata(conn, skb);
3485 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003487 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003488 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 }
3490
3491 kfree_skb(skb);
3492}
3493
Johan Hedberg9238f362013-03-05 20:37:48 +02003494static bool hci_req_is_complete(struct hci_dev *hdev)
3495{
3496 struct sk_buff *skb;
3497
3498 skb = skb_peek(&hdev->cmd_q);
3499 if (!skb)
3500 return true;
3501
3502 return bt_cb(skb)->req.start;
3503}
3504
Johan Hedberg42c6b122013-03-05 20:37:49 +02003505static void hci_resend_last(struct hci_dev *hdev)
3506{
3507 struct hci_command_hdr *sent;
3508 struct sk_buff *skb;
3509 u16 opcode;
3510
3511 if (!hdev->sent_cmd)
3512 return;
3513
3514 sent = (void *) hdev->sent_cmd->data;
3515 opcode = __le16_to_cpu(sent->opcode);
3516 if (opcode == HCI_OP_RESET)
3517 return;
3518
3519 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3520 if (!skb)
3521 return;
3522
3523 skb_queue_head(&hdev->cmd_q, skb);
3524 queue_work(hdev->workqueue, &hdev->cmd_work);
3525}
3526
Johan Hedberg9238f362013-03-05 20:37:48 +02003527void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3528{
3529 hci_req_complete_t req_complete = NULL;
3530 struct sk_buff *skb;
3531 unsigned long flags;
3532
3533 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3534
Johan Hedberg42c6b122013-03-05 20:37:49 +02003535 /* If the completed command doesn't match the last one that was
3536 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003537 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003538 if (!hci_sent_cmd_data(hdev, opcode)) {
3539 /* Some CSR based controllers generate a spontaneous
3540 * reset complete event during init and any pending
3541 * command will never be completed. In such a case we
3542 * need to resend whatever was the last sent
3543 * command.
3544 */
3545 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3546 hci_resend_last(hdev);
3547
Johan Hedberg9238f362013-03-05 20:37:48 +02003548 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003549 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003550
3551 /* If the command succeeded and there's still more commands in
3552 * this request the request is not yet complete.
3553 */
3554 if (!status && !hci_req_is_complete(hdev))
3555 return;
3556
3557 /* If this was the last command in a request the complete
3558 * callback would be found in hdev->sent_cmd instead of the
3559 * command queue (hdev->cmd_q).
3560 */
3561 if (hdev->sent_cmd) {
3562 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003563
3564 if (req_complete) {
3565 /* We must set the complete callback to NULL to
3566 * avoid calling the callback more than once if
3567 * this function gets called again.
3568 */
3569 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3570
Johan Hedberg9238f362013-03-05 20:37:48 +02003571 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003572 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003573 }
3574
3575 /* Remove all pending commands belonging to this request */
3576 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3577 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3578 if (bt_cb(skb)->req.start) {
3579 __skb_queue_head(&hdev->cmd_q, skb);
3580 break;
3581 }
3582
3583 req_complete = bt_cb(skb)->req.complete;
3584 kfree_skb(skb);
3585 }
3586 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3587
3588call_complete:
3589 if (req_complete)
3590 req_complete(hdev, status);
3591}
3592
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003593static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003595 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 struct sk_buff *skb;
3597
3598 BT_DBG("%s", hdev->name);
3599
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003601 /* Send copy to monitor */
3602 hci_send_to_monitor(hdev, skb);
3603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 if (atomic_read(&hdev->promisc)) {
3605 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003606 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 }
3608
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003609 if (test_bit(HCI_RAW, &hdev->flags) ||
3610 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 kfree_skb(skb);
3612 continue;
3613 }
3614
3615 if (test_bit(HCI_INIT, &hdev->flags)) {
3616 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003617 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 case HCI_ACLDATA_PKT:
3619 case HCI_SCODATA_PKT:
3620 kfree_skb(skb);
3621 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 }
3624
3625 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003626 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003628 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 hci_event_packet(hdev, skb);
3630 break;
3631
3632 case HCI_ACLDATA_PKT:
3633 BT_DBG("%s ACL data packet", hdev->name);
3634 hci_acldata_packet(hdev, skb);
3635 break;
3636
3637 case HCI_SCODATA_PKT:
3638 BT_DBG("%s SCO data packet", hdev->name);
3639 hci_scodata_packet(hdev, skb);
3640 break;
3641
3642 default:
3643 kfree_skb(skb);
3644 break;
3645 }
3646 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647}
3648
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003649static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003651 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 struct sk_buff *skb;
3653
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003654 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3655 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003658 if (atomic_read(&hdev->cmd_cnt)) {
3659 skb = skb_dequeue(&hdev->cmd_q);
3660 if (!skb)
3661 return;
3662
Wei Yongjun7585b972009-02-25 18:29:52 +08003663 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003665 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003666 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 atomic_dec(&hdev->cmd_cnt);
3668 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003669 if (test_bit(HCI_RESET, &hdev->flags))
3670 del_timer(&hdev->cmd_timer);
3671 else
3672 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003673 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 } else {
3675 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003676 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 }
3678 }
3679}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003680
Andre Guedes31f79562012-04-24 21:02:53 -03003681u8 bdaddr_to_le(u8 bdaddr_type)
3682{
3683 switch (bdaddr_type) {
3684 case BDADDR_LE_PUBLIC:
3685 return ADDR_LE_DEV_PUBLIC;
3686
3687 default:
3688 /* Fallback to LE Random address type */
3689 return ADDR_LE_DEV_RANDOM;
3690 }
3691}