blob: 7cbdd33d9b389f4ee8c0ca5105711287ee451f3d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300641 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700651 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500664 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 hci_update_ad(req);
667 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677}
678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716}
717
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 __u8 scan = opt;
721
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 __u8 auth = opt;
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 __u8 encrypt = opt;
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749{
750 __le16 policy = cpu_to_le16(opt);
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200753
754 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200756}
757
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900758/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200762 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200770 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200781
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
Andre Guedes6fbe1952012-02-03 17:47:58 -0300786 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300787 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200789 return true;
790
Andre Guedes6fbe1952012-02-03 17:47:58 -0300791 default:
792 return false;
793 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794}
795
Johan Hedbergff9ef572012-01-04 14:23:45 +0200796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200807 break;
808 case DISCOVERY_STARTING:
809 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300810 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200811 mgmt_discovering(hdev, 1);
812 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200813 case DISCOVERY_RESOLVING:
814 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300822void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Johan Hedberg30883512012-01-04 14:16:21 +0200824 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200825 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Johan Hedberg561aafb2012-01-04 13:31:59 +0200827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200829 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Johan Hedberg30883512012-01-04 14:16:21 +0200839 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 struct inquiry_entry *e;
841
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300842 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Johan Hedberg561aafb2012-01-04 13:31:59 +0200844 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200846 return e;
847 }
848
849 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300853 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854{
Johan Hedberg30883512012-01-04 14:16:21 +0200855 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 struct inquiry_entry *e;
857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300858 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
862 return e;
863 }
864
865 return NULL;
866}
867
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300869 bdaddr_t *bdaddr,
870 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
Johan Hedberga3d4e202012-01-09 00:53:02 +0200887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300888 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300898 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
Johan Hedberg31754052012-01-04 13:39:52 +0200906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300907 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Johan Hedberg30883512012-01-04 14:16:21 +0200909 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Szymon Janc2b2fec42012-11-20 11:38:54 +0100914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200916 if (ssp)
917 *ssp = data->ssp_mode;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200920 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
Johan Hedberga3d4e202012-01-09 00:53:02 +0200924 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300925 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
Johan Hedberg561aafb2012-01-04 13:31:59 +0200930 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200931 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932
Johan Hedberg561aafb2012-01-04 13:31:59 +0200933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200936 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
946
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
Johan Hedberg30883512012-01-04 14:16:21 +0200966 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
Johan Hedberg561aafb2012-01-04 13:31:59 +0200971 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200973
974 if (copied >= num)
975 break;
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
Andre Guedes3e13fa12013-03-27 20:04:56 -03001010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -ENODEV;
1031
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001037 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001040 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 do_inquiry = 1;
1042 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Marcel Holtmann04837f62006-07-03 10:02:33 +02001045 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001046
1047 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001050 if (err < 0)
1051 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 err = -ENOMEM;
1072 goto done;
1073 }
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001077 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001084 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001086 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001154void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001155{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001156 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001159
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001160 if (!lmp_le_capable(hdev))
1161 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177}
1178
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001179static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 int ret = 0;
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 BT_DBG("%s %p", hdev->name, hdev);
1184
1185 hci_req_lock(hdev);
1186
Johan Hovold94324962012-03-15 14:48:41 +01001187 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1188 ret = -ENODEV;
1189 goto done;
1190 }
1191
Johan Hedbergbf543032013-09-13 08:58:18 +03001192 /* Check for rfkill but allow the HCI setup stage to proceed
1193 * (which in itself doesn't cause any RF activity).
1194 */
1195 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1196 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001197 ret = -ERFKILL;
1198 goto done;
1199 }
1200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 if (test_bit(HCI_UP, &hdev->flags)) {
1202 ret = -EALREADY;
1203 goto done;
1204 }
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (hdev->open(hdev)) {
1207 ret = -EIO;
1208 goto done;
1209 }
1210
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001211 atomic_set(&hdev->cmd_cnt, 1);
1212 set_bit(HCI_INIT, &hdev->flags);
1213
1214 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1215 ret = hdev->setup(hdev);
1216
1217 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001218 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1219 set_bit(HCI_RAW, &hdev->flags);
1220
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001221 if (!test_bit(HCI_RAW, &hdev->flags) &&
1222 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001223 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 }
1225
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001226 clear_bit(HCI_INIT, &hdev->flags);
1227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 if (!ret) {
1229 hci_dev_hold(hdev);
1230 set_bit(HCI_UP, &hdev->flags);
1231 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001232 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001233 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001234 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001235 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001236 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001237 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001238 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001239 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001241 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001242 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001243 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 skb_queue_purge(&hdev->cmd_q);
1246 skb_queue_purge(&hdev->rx_q);
1247
1248 if (hdev->flush)
1249 hdev->flush(hdev);
1250
1251 if (hdev->sent_cmd) {
1252 kfree_skb(hdev->sent_cmd);
1253 hdev->sent_cmd = NULL;
1254 }
1255
1256 hdev->close(hdev);
1257 hdev->flags = 0;
1258 }
1259
1260done:
1261 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 return ret;
1263}
1264
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001265/* ---- HCI ioctl helpers ---- */
1266
1267int hci_dev_open(__u16 dev)
1268{
1269 struct hci_dev *hdev;
1270 int err;
1271
1272 hdev = hci_dev_get(dev);
1273 if (!hdev)
1274 return -ENODEV;
1275
Johan Hedberge1d08f42013-10-01 22:44:50 +03001276 /* We need to ensure that no other power on/off work is pending
1277 * before proceeding to call hci_dev_do_open. This is
1278 * particularly important if the setup procedure has not yet
1279 * completed.
1280 */
1281 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1282 cancel_delayed_work(&hdev->power_off);
1283
1284 flush_workqueue(hdev->req_workqueue);
1285
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001286 err = hci_dev_do_open(hdev);
1287
1288 hci_dev_put(hdev);
1289
1290 return err;
1291}
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293static int hci_dev_do_close(struct hci_dev *hdev)
1294{
1295 BT_DBG("%s %p", hdev->name, hdev);
1296
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001297 cancel_delayed_work(&hdev->power_off);
1298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 hci_req_cancel(hdev, ENODEV);
1300 hci_req_lock(hdev);
1301
1302 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001303 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 hci_req_unlock(hdev);
1305 return 0;
1306 }
1307
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001308 /* Flush RX and TX works */
1309 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001310 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001312 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001313 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001314 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001315 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001316 }
1317
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001318 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001319 cancel_delayed_work(&hdev->service_cache);
1320
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001321 cancel_delayed_work_sync(&hdev->le_scan_disable);
1322
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001323 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001324 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001326 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 hci_notify(hdev, HCI_DEV_DOWN);
1329
1330 if (hdev->flush)
1331 hdev->flush(hdev);
1332
1333 /* Reset device */
1334 skb_queue_purge(&hdev->cmd_q);
1335 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001336 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001337 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001339 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 clear_bit(HCI_INIT, &hdev->flags);
1341 }
1342
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001343 /* flush cmd work */
1344 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
1346 /* Drop queues */
1347 skb_queue_purge(&hdev->rx_q);
1348 skb_queue_purge(&hdev->cmd_q);
1349 skb_queue_purge(&hdev->raw_q);
1350
1351 /* Drop last sent command */
1352 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001353 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 kfree_skb(hdev->sent_cmd);
1355 hdev->sent_cmd = NULL;
1356 }
1357
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001358 kfree_skb(hdev->recv_evt);
1359 hdev->recv_evt = NULL;
1360
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 /* After this point our queues are empty
1362 * and no tasks are scheduled. */
1363 hdev->close(hdev);
1364
Johan Hedberg35b973c2013-03-15 17:06:59 -05001365 /* Clear flags */
1366 hdev->flags = 0;
1367 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1368
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001369 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1370 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001371 hci_dev_lock(hdev);
1372 mgmt_powered(hdev, 0);
1373 hci_dev_unlock(hdev);
1374 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001375
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001376 /* Controller radio is available but is currently powered down */
1377 hdev->amp_status = 0;
1378
Johan Hedberge59fda82012-02-22 18:11:53 +02001379 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001380 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 hci_req_unlock(hdev);
1383
1384 hci_dev_put(hdev);
1385 return 0;
1386}
1387
1388int hci_dev_close(__u16 dev)
1389{
1390 struct hci_dev *hdev;
1391 int err;
1392
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001393 hdev = hci_dev_get(dev);
1394 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001396
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001397 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1398 err = -EBUSY;
1399 goto done;
1400 }
1401
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001402 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1403 cancel_delayed_work(&hdev->power_off);
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001406
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001407done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 hci_dev_put(hdev);
1409 return err;
1410}
1411
1412int hci_dev_reset(__u16 dev)
1413{
1414 struct hci_dev *hdev;
1415 int ret = 0;
1416
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001417 hdev = hci_dev_get(dev);
1418 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 return -ENODEV;
1420
1421 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Marcel Holtmann808a0492013-08-26 20:57:58 -07001423 if (!test_bit(HCI_UP, &hdev->flags)) {
1424 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001428 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1429 ret = -EBUSY;
1430 goto done;
1431 }
1432
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 /* Drop queues */
1434 skb_queue_purge(&hdev->rx_q);
1435 skb_queue_purge(&hdev->cmd_q);
1436
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001437 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001438 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001440 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442 if (hdev->flush)
1443 hdev->flush(hdev);
1444
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001445 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001446 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001449 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 hci_req_unlock(hdev);
1453 hci_dev_put(hdev);
1454 return ret;
1455}
1456
1457int hci_dev_reset_stat(__u16 dev)
1458{
1459 struct hci_dev *hdev;
1460 int ret = 0;
1461
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001462 hdev = hci_dev_get(dev);
1463 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 return -ENODEV;
1465
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001466 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1467 ret = -EBUSY;
1468 goto done;
1469 }
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1472
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001473done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return ret;
1476}
1477
1478int hci_dev_cmd(unsigned int cmd, void __user *arg)
1479{
1480 struct hci_dev *hdev;
1481 struct hci_dev_req dr;
1482 int err = 0;
1483
1484 if (copy_from_user(&dr, arg, sizeof(dr)))
1485 return -EFAULT;
1486
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001487 hdev = hci_dev_get(dr.dev_id);
1488 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return -ENODEV;
1490
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001491 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1492 err = -EBUSY;
1493 goto done;
1494 }
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 switch (cmd) {
1497 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001498 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1499 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 break;
1501
1502 case HCISETENCRYPT:
1503 if (!lmp_encrypt_capable(hdev)) {
1504 err = -EOPNOTSUPP;
1505 break;
1506 }
1507
1508 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1509 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001510 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1511 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if (err)
1513 break;
1514 }
1515
Johan Hedberg01178cd2013-03-05 20:37:41 +02001516 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1517 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 break;
1519
1520 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001521 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1522 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 break;
1524
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001525 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001526 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1527 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001528 break;
1529
1530 case HCISETLINKMODE:
1531 hdev->link_mode = ((__u16) dr.dev_opt) &
1532 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1533 break;
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 case HCISETPTYPE:
1536 hdev->pkt_type = (__u16) dr.dev_opt;
1537 break;
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001540 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1541 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 break;
1543
1544 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001545 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1546 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 break;
1548
1549 default:
1550 err = -EINVAL;
1551 break;
1552 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001553
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001554done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 hci_dev_put(hdev);
1556 return err;
1557}
1558
1559int hci_get_dev_list(void __user *arg)
1560{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001561 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 struct hci_dev_list_req *dl;
1563 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 int n = 0, size, err;
1565 __u16 dev_num;
1566
1567 if (get_user(dev_num, (__u16 __user *) arg))
1568 return -EFAULT;
1569
1570 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1571 return -EINVAL;
1572
1573 size = sizeof(*dl) + dev_num * sizeof(*dr);
1574
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001575 dl = kzalloc(size, GFP_KERNEL);
1576 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 return -ENOMEM;
1578
1579 dr = dl->dev_req;
1580
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001581 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001582 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001583 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001584 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001585
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001586 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1587 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001588
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 (dr + n)->dev_id = hdev->id;
1590 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if (++n >= dev_num)
1593 break;
1594 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001595 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
1597 dl->dev_num = n;
1598 size = sizeof(*dl) + n * sizeof(*dr);
1599
1600 err = copy_to_user(arg, dl, size);
1601 kfree(dl);
1602
1603 return err ? -EFAULT : 0;
1604}
1605
1606int hci_get_dev_info(void __user *arg)
1607{
1608 struct hci_dev *hdev;
1609 struct hci_dev_info di;
1610 int err = 0;
1611
1612 if (copy_from_user(&di, arg, sizeof(di)))
1613 return -EFAULT;
1614
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001615 hdev = hci_dev_get(di.dev_id);
1616 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 return -ENODEV;
1618
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001619 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001620 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001621
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001622 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1623 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 strcpy(di.name, hdev->name);
1626 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001627 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 di.flags = hdev->flags;
1629 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001630 if (lmp_bredr_capable(hdev)) {
1631 di.acl_mtu = hdev->acl_mtu;
1632 di.acl_pkts = hdev->acl_pkts;
1633 di.sco_mtu = hdev->sco_mtu;
1634 di.sco_pkts = hdev->sco_pkts;
1635 } else {
1636 di.acl_mtu = hdev->le_mtu;
1637 di.acl_pkts = hdev->le_pkts;
1638 di.sco_mtu = 0;
1639 di.sco_pkts = 0;
1640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 di.link_policy = hdev->link_policy;
1642 di.link_mode = hdev->link_mode;
1643
1644 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1645 memcpy(&di.features, &hdev->features, sizeof(di.features));
1646
1647 if (copy_to_user(arg, &di, sizeof(di)))
1648 err = -EFAULT;
1649
1650 hci_dev_put(hdev);
1651
1652 return err;
1653}
1654
1655/* ---- Interface to HCI drivers ---- */
1656
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001657static int hci_rfkill_set_block(void *data, bool blocked)
1658{
1659 struct hci_dev *hdev = data;
1660
1661 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1662
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001663 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1664 return -EBUSY;
1665
Johan Hedberg5e130362013-09-13 08:58:17 +03001666 if (blocked) {
1667 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001668 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1669 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001670 } else {
1671 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001672 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001673
1674 return 0;
1675}
1676
1677static const struct rfkill_ops hci_rfkill_ops = {
1678 .set_block = hci_rfkill_set_block,
1679};
1680
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001681static void hci_power_on(struct work_struct *work)
1682{
1683 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001684 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001685
1686 BT_DBG("%s", hdev->name);
1687
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001688 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001689 if (err < 0) {
1690 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001691 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001692 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001693
Johan Hedbergbf543032013-09-13 08:58:18 +03001694 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1695 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1696 hci_dev_do_close(hdev);
1697 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001698 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1699 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001700 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001701
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001702 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001703 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001704}
1705
1706static void hci_power_off(struct work_struct *work)
1707{
Johan Hedberg32435532011-11-07 22:16:04 +02001708 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001709 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001710
1711 BT_DBG("%s", hdev->name);
1712
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001713 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001714}
1715
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001716static void hci_discov_off(struct work_struct *work)
1717{
1718 struct hci_dev *hdev;
1719 u8 scan = SCAN_PAGE;
1720
1721 hdev = container_of(work, struct hci_dev, discov_off.work);
1722
1723 BT_DBG("%s", hdev->name);
1724
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001725 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001726
1727 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1728
1729 hdev->discov_timeout = 0;
1730
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001731 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001732}
1733
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001734int hci_uuids_clear(struct hci_dev *hdev)
1735{
Johan Hedberg48210022013-01-27 00:31:28 +02001736 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001737
Johan Hedberg48210022013-01-27 00:31:28 +02001738 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1739 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001740 kfree(uuid);
1741 }
1742
1743 return 0;
1744}
1745
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001746int hci_link_keys_clear(struct hci_dev *hdev)
1747{
1748 struct list_head *p, *n;
1749
1750 list_for_each_safe(p, n, &hdev->link_keys) {
1751 struct link_key *key;
1752
1753 key = list_entry(p, struct link_key, list);
1754
1755 list_del(p);
1756 kfree(key);
1757 }
1758
1759 return 0;
1760}
1761
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001762int hci_smp_ltks_clear(struct hci_dev *hdev)
1763{
1764 struct smp_ltk *k, *tmp;
1765
1766 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1767 list_del(&k->list);
1768 kfree(k);
1769 }
1770
1771 return 0;
1772}
1773
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001774struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1775{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001776 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001777
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001778 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001779 if (bacmp(bdaddr, &k->bdaddr) == 0)
1780 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001781
1782 return NULL;
1783}
1784
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301785static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001786 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001787{
1788 /* Legacy key */
1789 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301790 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001791
1792 /* Debug keys are insecure so don't store them persistently */
1793 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301794 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001795
1796 /* Changed combination key and there's no previous one */
1797 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301798 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001799
1800 /* Security mode 3 case */
1801 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301802 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001803
1804 /* Neither local nor remote side had no-bonding as requirement */
1805 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301806 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001807
1808 /* Local side had dedicated bonding as requirement */
1809 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301810 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001811
1812 /* Remote side had dedicated bonding as requirement */
1813 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301814 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001815
1816 /* If none of the above criteria match, then don't store the key
1817 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301818 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001819}
1820
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001821struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001822{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001823 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001824
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001825 list_for_each_entry(k, &hdev->long_term_keys, list) {
1826 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001827 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001828 continue;
1829
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001830 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001831 }
1832
1833 return NULL;
1834}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001835
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001836struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001837 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001838{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001839 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001840
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001841 list_for_each_entry(k, &hdev->long_term_keys, list)
1842 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001843 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001844 return k;
1845
1846 return NULL;
1847}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001848
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001849int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001850 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001851{
1852 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301853 u8 old_key_type;
1854 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001855
1856 old_key = hci_find_link_key(hdev, bdaddr);
1857 if (old_key) {
1858 old_key_type = old_key->type;
1859 key = old_key;
1860 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001861 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001862 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1863 if (!key)
1864 return -ENOMEM;
1865 list_add(&key->list, &hdev->link_keys);
1866 }
1867
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001868 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001869
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001870 /* Some buggy controller combinations generate a changed
1871 * combination key for legacy pairing even when there's no
1872 * previous key */
1873 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001874 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001875 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001876 if (conn)
1877 conn->key_type = type;
1878 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001879
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001880 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001881 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001882 key->pin_len = pin_len;
1883
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001884 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001885 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001886 else
1887 key->type = type;
1888
Johan Hedberg4df378a2011-04-28 11:29:03 -07001889 if (!new_key)
1890 return 0;
1891
1892 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1893
Johan Hedberg744cf192011-11-08 20:40:14 +02001894 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001895
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301896 if (conn)
1897 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001898
1899 return 0;
1900}
1901
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001902int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001903 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001904 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001905{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001906 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001908 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1909 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001910
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001911 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1912 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001913 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001914 else {
1915 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001916 if (!key)
1917 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001918 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001919 }
1920
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001921 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001922 key->bdaddr_type = addr_type;
1923 memcpy(key->val, tk, sizeof(key->val));
1924 key->authenticated = authenticated;
1925 key->ediv = ediv;
1926 key->enc_size = enc_size;
1927 key->type = type;
1928 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001929
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001930 if (!new_key)
1931 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001932
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001933 if (type & HCI_SMP_LTK)
1934 mgmt_new_ltk(hdev, key, 1);
1935
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001936 return 0;
1937}
1938
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001939int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1940{
1941 struct link_key *key;
1942
1943 key = hci_find_link_key(hdev, bdaddr);
1944 if (!key)
1945 return -ENOENT;
1946
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001947 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001948
1949 list_del(&key->list);
1950 kfree(key);
1951
1952 return 0;
1953}
1954
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001955int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1956{
1957 struct smp_ltk *k, *tmp;
1958
1959 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1960 if (bacmp(bdaddr, &k->bdaddr))
1961 continue;
1962
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001963 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001964
1965 list_del(&k->list);
1966 kfree(k);
1967 }
1968
1969 return 0;
1970}
1971
Ville Tervo6bd32322011-02-16 16:32:41 +02001972/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001973static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001974{
1975 struct hci_dev *hdev = (void *) arg;
1976
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001977 if (hdev->sent_cmd) {
1978 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1979 u16 opcode = __le16_to_cpu(sent->opcode);
1980
1981 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1982 } else {
1983 BT_ERR("%s command tx timeout", hdev->name);
1984 }
1985
Ville Tervo6bd32322011-02-16 16:32:41 +02001986 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001987 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001988}
1989
Szymon Janc2763eda2011-03-22 13:12:22 +01001990struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001991 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001992{
1993 struct oob_data *data;
1994
1995 list_for_each_entry(data, &hdev->remote_oob_data, list)
1996 if (bacmp(bdaddr, &data->bdaddr) == 0)
1997 return data;
1998
1999 return NULL;
2000}
2001
2002int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2003{
2004 struct oob_data *data;
2005
2006 data = hci_find_remote_oob_data(hdev, bdaddr);
2007 if (!data)
2008 return -ENOENT;
2009
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002010 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002011
2012 list_del(&data->list);
2013 kfree(data);
2014
2015 return 0;
2016}
2017
2018int hci_remote_oob_data_clear(struct hci_dev *hdev)
2019{
2020 struct oob_data *data, *n;
2021
2022 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2023 list_del(&data->list);
2024 kfree(data);
2025 }
2026
2027 return 0;
2028}
2029
2030int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002031 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002032{
2033 struct oob_data *data;
2034
2035 data = hci_find_remote_oob_data(hdev, bdaddr);
2036
2037 if (!data) {
2038 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2039 if (!data)
2040 return -ENOMEM;
2041
2042 bacpy(&data->bdaddr, bdaddr);
2043 list_add(&data->list, &hdev->remote_oob_data);
2044 }
2045
2046 memcpy(data->hash, hash, sizeof(data->hash));
2047 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2048
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002049 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002050
2051 return 0;
2052}
2053
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002054struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002055{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002056 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002057
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002058 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002059 if (bacmp(bdaddr, &b->bdaddr) == 0)
2060 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002061
2062 return NULL;
2063}
2064
2065int hci_blacklist_clear(struct hci_dev *hdev)
2066{
2067 struct list_head *p, *n;
2068
2069 list_for_each_safe(p, n, &hdev->blacklist) {
2070 struct bdaddr_list *b;
2071
2072 b = list_entry(p, struct bdaddr_list, list);
2073
2074 list_del(p);
2075 kfree(b);
2076 }
2077
2078 return 0;
2079}
2080
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002081int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002082{
2083 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002084
2085 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2086 return -EBADF;
2087
Antti Julku5e762442011-08-25 16:48:02 +03002088 if (hci_blacklist_lookup(hdev, bdaddr))
2089 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002090
2091 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002092 if (!entry)
2093 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002094
2095 bacpy(&entry->bdaddr, bdaddr);
2096
2097 list_add(&entry->list, &hdev->blacklist);
2098
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002099 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002100}
2101
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002102int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002103{
2104 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002105
Szymon Janc1ec918c2011-11-16 09:32:21 +01002106 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002107 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002108
2109 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002110 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002111 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002112
2113 list_del(&entry->list);
2114 kfree(entry);
2115
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002116 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002117}
2118
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002119static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002120{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002121 if (status) {
2122 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002123
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002124 hci_dev_lock(hdev);
2125 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2126 hci_dev_unlock(hdev);
2127 return;
2128 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002129}
2130
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002131static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002132{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002133 /* General inquiry access code (GIAC) */
2134 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2135 struct hci_request req;
2136 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002137 int err;
2138
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002139 if (status) {
2140 BT_ERR("Failed to disable LE scanning: status %d", status);
2141 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002142 }
2143
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002144 switch (hdev->discovery.type) {
2145 case DISCOV_TYPE_LE:
2146 hci_dev_lock(hdev);
2147 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2148 hci_dev_unlock(hdev);
2149 break;
2150
2151 case DISCOV_TYPE_INTERLEAVED:
2152 hci_req_init(&req, hdev);
2153
2154 memset(&cp, 0, sizeof(cp));
2155 memcpy(&cp.lap, lap, sizeof(cp.lap));
2156 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2157 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2158
2159 hci_dev_lock(hdev);
2160
2161 hci_inquiry_cache_flush(hdev);
2162
2163 err = hci_req_run(&req, inquiry_complete);
2164 if (err) {
2165 BT_ERR("Inquiry request failed: err %d", err);
2166 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2167 }
2168
2169 hci_dev_unlock(hdev);
2170 break;
2171 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002172}
2173
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002174static void le_scan_disable_work(struct work_struct *work)
2175{
2176 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002177 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002178 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002179 struct hci_request req;
2180 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002181
2182 BT_DBG("%s", hdev->name);
2183
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002184 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002185
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002186 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002187 cp.enable = LE_SCAN_DISABLE;
2188 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002189
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002190 err = hci_req_run(&req, le_scan_disable_work_complete);
2191 if (err)
2192 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002193}
2194
David Herrmann9be0dab2012-04-22 14:39:57 +02002195/* Alloc HCI device */
2196struct hci_dev *hci_alloc_dev(void)
2197{
2198 struct hci_dev *hdev;
2199
2200 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2201 if (!hdev)
2202 return NULL;
2203
David Herrmannb1b813d2012-04-22 14:39:58 +02002204 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2205 hdev->esco_type = (ESCO_HV1);
2206 hdev->link_mode = (HCI_LM_ACCEPT);
2207 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002208 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2209 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002210
David Herrmannb1b813d2012-04-22 14:39:58 +02002211 hdev->sniff_max_interval = 800;
2212 hdev->sniff_min_interval = 80;
2213
2214 mutex_init(&hdev->lock);
2215 mutex_init(&hdev->req_lock);
2216
2217 INIT_LIST_HEAD(&hdev->mgmt_pending);
2218 INIT_LIST_HEAD(&hdev->blacklist);
2219 INIT_LIST_HEAD(&hdev->uuids);
2220 INIT_LIST_HEAD(&hdev->link_keys);
2221 INIT_LIST_HEAD(&hdev->long_term_keys);
2222 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002223 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002224
2225 INIT_WORK(&hdev->rx_work, hci_rx_work);
2226 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2227 INIT_WORK(&hdev->tx_work, hci_tx_work);
2228 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002229
David Herrmannb1b813d2012-04-22 14:39:58 +02002230 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2231 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2232 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2233
David Herrmannb1b813d2012-04-22 14:39:58 +02002234 skb_queue_head_init(&hdev->rx_q);
2235 skb_queue_head_init(&hdev->cmd_q);
2236 skb_queue_head_init(&hdev->raw_q);
2237
2238 init_waitqueue_head(&hdev->req_wait_q);
2239
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002240 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002241
David Herrmannb1b813d2012-04-22 14:39:58 +02002242 hci_init_sysfs(hdev);
2243 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002244
2245 return hdev;
2246}
2247EXPORT_SYMBOL(hci_alloc_dev);
2248
2249/* Free HCI device */
2250void hci_free_dev(struct hci_dev *hdev)
2251{
David Herrmann9be0dab2012-04-22 14:39:57 +02002252 /* will free via device release */
2253 put_device(&hdev->dev);
2254}
2255EXPORT_SYMBOL(hci_free_dev);
2256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257/* Register HCI device */
2258int hci_register_dev(struct hci_dev *hdev)
2259{
David Herrmannb1b813d2012-04-22 14:39:58 +02002260 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
David Herrmann010666a2012-01-07 15:47:07 +01002262 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 return -EINVAL;
2264
Mat Martineau08add512011-11-02 16:18:36 -07002265 /* Do not allow HCI_AMP devices to register at index 0,
2266 * so the index can be used as the AMP controller ID.
2267 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002268 switch (hdev->dev_type) {
2269 case HCI_BREDR:
2270 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2271 break;
2272 case HCI_AMP:
2273 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2274 break;
2275 default:
2276 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002278
Sasha Levin3df92b32012-05-27 22:36:56 +02002279 if (id < 0)
2280 return id;
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 sprintf(hdev->name, "hci%d", id);
2283 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002284
2285 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2286
Kees Cookd8537542013-07-03 15:04:57 -07002287 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2288 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002289 if (!hdev->workqueue) {
2290 error = -ENOMEM;
2291 goto err;
2292 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002293
Kees Cookd8537542013-07-03 15:04:57 -07002294 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2295 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002296 if (!hdev->req_workqueue) {
2297 destroy_workqueue(hdev->workqueue);
2298 error = -ENOMEM;
2299 goto err;
2300 }
2301
David Herrmann33ca9542011-10-08 14:58:49 +02002302 error = hci_add_sysfs(hdev);
2303 if (error < 0)
2304 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002306 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002307 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2308 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002309 if (hdev->rfkill) {
2310 if (rfkill_register(hdev->rfkill) < 0) {
2311 rfkill_destroy(hdev->rfkill);
2312 hdev->rfkill = NULL;
2313 }
2314 }
2315
Johan Hedberg5e130362013-09-13 08:58:17 +03002316 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2317 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2318
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002319 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002320
2321 if (hdev->dev_type != HCI_AMP)
2322 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2323
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002324 write_lock(&hci_dev_list_lock);
2325 list_add(&hdev->list, &hci_dev_list);
2326 write_unlock(&hci_dev_list_lock);
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002329 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Johan Hedberg19202572013-01-14 22:33:51 +02002331 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002334
David Herrmann33ca9542011-10-08 14:58:49 +02002335err_wqueue:
2336 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002337 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002338err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002339 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002340
David Herrmann33ca9542011-10-08 14:58:49 +02002341 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342}
2343EXPORT_SYMBOL(hci_register_dev);
2344
2345/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002346void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347{
Sasha Levin3df92b32012-05-27 22:36:56 +02002348 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002349
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002350 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Johan Hovold94324962012-03-15 14:48:41 +01002352 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2353
Sasha Levin3df92b32012-05-27 22:36:56 +02002354 id = hdev->id;
2355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002356 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002358 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
2360 hci_dev_do_close(hdev);
2361
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302362 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002363 kfree_skb(hdev->reassembly[i]);
2364
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002365 cancel_work_sync(&hdev->power_on);
2366
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002367 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002368 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002369 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002370 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002371 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002372 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002373
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002374 /* mgmt_index_removed should take care of emptying the
2375 * pending list */
2376 BUG_ON(!list_empty(&hdev->mgmt_pending));
2377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 hci_notify(hdev, HCI_DEV_UNREG);
2379
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002380 if (hdev->rfkill) {
2381 rfkill_unregister(hdev->rfkill);
2382 rfkill_destroy(hdev->rfkill);
2383 }
2384
David Herrmannce242972011-10-08 14:58:48 +02002385 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002386
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002387 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002388 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002389
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002390 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002391 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002392 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002393 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002394 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002395 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002396 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002397
David Herrmanndc946bd2012-01-07 15:47:24 +01002398 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002399
2400 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401}
2402EXPORT_SYMBOL(hci_unregister_dev);
2403
2404/* Suspend HCI device */
2405int hci_suspend_dev(struct hci_dev *hdev)
2406{
2407 hci_notify(hdev, HCI_DEV_SUSPEND);
2408 return 0;
2409}
2410EXPORT_SYMBOL(hci_suspend_dev);
2411
2412/* Resume HCI device */
2413int hci_resume_dev(struct hci_dev *hdev)
2414{
2415 hci_notify(hdev, HCI_DEV_RESUME);
2416 return 0;
2417}
2418EXPORT_SYMBOL(hci_resume_dev);
2419
Marcel Holtmann76bca882009-11-18 00:40:39 +01002420/* Receive frame from HCI drivers */
2421int hci_recv_frame(struct sk_buff *skb)
2422{
2423 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2424 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002425 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002426 kfree_skb(skb);
2427 return -ENXIO;
2428 }
2429
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002430 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002431 bt_cb(skb)->incoming = 1;
2432
2433 /* Time stamp */
2434 __net_timestamp(skb);
2435
Marcel Holtmann76bca882009-11-18 00:40:39 +01002436 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002437 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002438
Marcel Holtmann76bca882009-11-18 00:40:39 +01002439 return 0;
2440}
2441EXPORT_SYMBOL(hci_recv_frame);
2442
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302443static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002444 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302445{
2446 int len = 0;
2447 int hlen = 0;
2448 int remain = count;
2449 struct sk_buff *skb;
2450 struct bt_skb_cb *scb;
2451
2452 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002453 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302454 return -EILSEQ;
2455
2456 skb = hdev->reassembly[index];
2457
2458 if (!skb) {
2459 switch (type) {
2460 case HCI_ACLDATA_PKT:
2461 len = HCI_MAX_FRAME_SIZE;
2462 hlen = HCI_ACL_HDR_SIZE;
2463 break;
2464 case HCI_EVENT_PKT:
2465 len = HCI_MAX_EVENT_SIZE;
2466 hlen = HCI_EVENT_HDR_SIZE;
2467 break;
2468 case HCI_SCODATA_PKT:
2469 len = HCI_MAX_SCO_SIZE;
2470 hlen = HCI_SCO_HDR_SIZE;
2471 break;
2472 }
2473
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002474 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302475 if (!skb)
2476 return -ENOMEM;
2477
2478 scb = (void *) skb->cb;
2479 scb->expect = hlen;
2480 scb->pkt_type = type;
2481
2482 skb->dev = (void *) hdev;
2483 hdev->reassembly[index] = skb;
2484 }
2485
2486 while (count) {
2487 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002488 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302489
2490 memcpy(skb_put(skb, len), data, len);
2491
2492 count -= len;
2493 data += len;
2494 scb->expect -= len;
2495 remain = count;
2496
2497 switch (type) {
2498 case HCI_EVENT_PKT:
2499 if (skb->len == HCI_EVENT_HDR_SIZE) {
2500 struct hci_event_hdr *h = hci_event_hdr(skb);
2501 scb->expect = h->plen;
2502
2503 if (skb_tailroom(skb) < scb->expect) {
2504 kfree_skb(skb);
2505 hdev->reassembly[index] = NULL;
2506 return -ENOMEM;
2507 }
2508 }
2509 break;
2510
2511 case HCI_ACLDATA_PKT:
2512 if (skb->len == HCI_ACL_HDR_SIZE) {
2513 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2514 scb->expect = __le16_to_cpu(h->dlen);
2515
2516 if (skb_tailroom(skb) < scb->expect) {
2517 kfree_skb(skb);
2518 hdev->reassembly[index] = NULL;
2519 return -ENOMEM;
2520 }
2521 }
2522 break;
2523
2524 case HCI_SCODATA_PKT:
2525 if (skb->len == HCI_SCO_HDR_SIZE) {
2526 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2527 scb->expect = h->dlen;
2528
2529 if (skb_tailroom(skb) < scb->expect) {
2530 kfree_skb(skb);
2531 hdev->reassembly[index] = NULL;
2532 return -ENOMEM;
2533 }
2534 }
2535 break;
2536 }
2537
2538 if (scb->expect == 0) {
2539 /* Complete frame */
2540
2541 bt_cb(skb)->pkt_type = type;
2542 hci_recv_frame(skb);
2543
2544 hdev->reassembly[index] = NULL;
2545 return remain;
2546 }
2547 }
2548
2549 return remain;
2550}
2551
Marcel Holtmannef222012007-07-11 06:42:04 +02002552int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2553{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302554 int rem = 0;
2555
Marcel Holtmannef222012007-07-11 06:42:04 +02002556 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2557 return -EILSEQ;
2558
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002559 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002560 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302561 if (rem < 0)
2562 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002563
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302564 data += (count - rem);
2565 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002566 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002567
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302568 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002569}
2570EXPORT_SYMBOL(hci_recv_fragment);
2571
Suraj Sumangala99811512010-07-14 13:02:19 +05302572#define STREAM_REASSEMBLY 0
2573
2574int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2575{
2576 int type;
2577 int rem = 0;
2578
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002579 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302580 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2581
2582 if (!skb) {
2583 struct { char type; } *pkt;
2584
2585 /* Start of the frame */
2586 pkt = data;
2587 type = pkt->type;
2588
2589 data++;
2590 count--;
2591 } else
2592 type = bt_cb(skb)->pkt_type;
2593
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002594 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002595 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302596 if (rem < 0)
2597 return rem;
2598
2599 data += (count - rem);
2600 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002601 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302602
2603 return rem;
2604}
2605EXPORT_SYMBOL(hci_recv_stream_fragment);
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607/* ---- Interface to upper protocols ---- */
2608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609int hci_register_cb(struct hci_cb *cb)
2610{
2611 BT_DBG("%p name %s", cb, cb->name);
2612
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002613 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002615 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616
2617 return 0;
2618}
2619EXPORT_SYMBOL(hci_register_cb);
2620
2621int hci_unregister_cb(struct hci_cb *cb)
2622{
2623 BT_DBG("%p name %s", cb, cb->name);
2624
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002625 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002627 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
2629 return 0;
2630}
2631EXPORT_SYMBOL(hci_unregister_cb);
2632
2633static int hci_send_frame(struct sk_buff *skb)
2634{
2635 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2636
2637 if (!hdev) {
2638 kfree_skb(skb);
2639 return -ENODEV;
2640 }
2641
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002642 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002644 /* Time stamp */
2645 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002647 /* Send copy to monitor */
2648 hci_send_to_monitor(hdev, skb);
2649
2650 if (atomic_read(&hdev->promisc)) {
2651 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002652 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 }
2654
2655 /* Get rid of skb owner, prior to sending to the driver. */
2656 skb_orphan(skb);
2657
2658 return hdev->send(skb);
2659}
2660
Johan Hedberg3119ae92013-03-05 20:37:44 +02002661void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2662{
2663 skb_queue_head_init(&req->cmd_q);
2664 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002665 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002666}
2667
2668int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2669{
2670 struct hci_dev *hdev = req->hdev;
2671 struct sk_buff *skb;
2672 unsigned long flags;
2673
2674 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2675
Andre Guedes5d73e032013-03-08 11:20:16 -03002676 /* If an error occured during request building, remove all HCI
2677 * commands queued on the HCI request queue.
2678 */
2679 if (req->err) {
2680 skb_queue_purge(&req->cmd_q);
2681 return req->err;
2682 }
2683
Johan Hedberg3119ae92013-03-05 20:37:44 +02002684 /* Do not allow empty requests */
2685 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002686 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002687
2688 skb = skb_peek_tail(&req->cmd_q);
2689 bt_cb(skb)->req.complete = complete;
2690
2691 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2692 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2693 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2694
2695 queue_work(hdev->workqueue, &hdev->cmd_work);
2696
2697 return 0;
2698}
2699
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002700static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002701 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702{
2703 int len = HCI_COMMAND_HDR_SIZE + plen;
2704 struct hci_command_hdr *hdr;
2705 struct sk_buff *skb;
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002708 if (!skb)
2709 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
2711 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002712 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 hdr->plen = plen;
2714
2715 if (plen)
2716 memcpy(skb_put(skb, plen), param, plen);
2717
2718 BT_DBG("skb len %d", skb->len);
2719
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002720 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002722
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002723 return skb;
2724}
2725
2726/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002727int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2728 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002729{
2730 struct sk_buff *skb;
2731
2732 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2733
2734 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2735 if (!skb) {
2736 BT_ERR("%s no memory for command", hdev->name);
2737 return -ENOMEM;
2738 }
2739
Johan Hedberg11714b32013-03-05 20:37:47 +02002740 /* Stand-alone HCI commands must be flaged as
2741 * single-command requests.
2742 */
2743 bt_cb(skb)->req.start = true;
2744
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002746 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
2748 return 0;
2749}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
Johan Hedberg71c76a12013-03-05 20:37:46 +02002751/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002752void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2753 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002754{
2755 struct hci_dev *hdev = req->hdev;
2756 struct sk_buff *skb;
2757
2758 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2759
Andre Guedes34739c12013-03-08 11:20:18 -03002760 /* If an error occured during request building, there is no point in
2761 * queueing the HCI command. We can simply return.
2762 */
2763 if (req->err)
2764 return;
2765
Johan Hedberg71c76a12013-03-05 20:37:46 +02002766 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2767 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002768 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2769 hdev->name, opcode);
2770 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002771 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002772 }
2773
2774 if (skb_queue_empty(&req->cmd_q))
2775 bt_cb(skb)->req.start = true;
2776
Johan Hedberg02350a72013-04-03 21:50:29 +03002777 bt_cb(skb)->req.event = event;
2778
Johan Hedberg71c76a12013-03-05 20:37:46 +02002779 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002780}
2781
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002782void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2783 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002784{
2785 hci_req_add_ev(req, opcode, plen, param, 0);
2786}
2787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002789void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790{
2791 struct hci_command_hdr *hdr;
2792
2793 if (!hdev->sent_cmd)
2794 return NULL;
2795
2796 hdr = (void *) hdev->sent_cmd->data;
2797
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002798 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 return NULL;
2800
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002801 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
2803 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2804}
2805
2806/* Send ACL data */
2807static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2808{
2809 struct hci_acl_hdr *hdr;
2810 int len = skb->len;
2811
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002812 skb_push(skb, HCI_ACL_HDR_SIZE);
2813 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002814 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002815 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2816 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817}
2818
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002819static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002820 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002822 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 struct hci_dev *hdev = conn->hdev;
2824 struct sk_buff *list;
2825
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002826 skb->len = skb_headlen(skb);
2827 skb->data_len = 0;
2828
2829 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002830
2831 switch (hdev->dev_type) {
2832 case HCI_BREDR:
2833 hci_add_acl_hdr(skb, conn->handle, flags);
2834 break;
2835 case HCI_AMP:
2836 hci_add_acl_hdr(skb, chan->handle, flags);
2837 break;
2838 default:
2839 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2840 return;
2841 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002843 list = skb_shinfo(skb)->frag_list;
2844 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 /* Non fragmented */
2846 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2847
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002848 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 } else {
2850 /* Fragmented */
2851 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2852
2853 skb_shinfo(skb)->frag_list = NULL;
2854
2855 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002856 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002858 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002859
2860 flags &= ~ACL_START;
2861 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 do {
2863 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002864
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002866 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002867 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
2869 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2870
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002871 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 } while (list);
2873
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002874 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002876}
2877
2878void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2879{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002880 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002881
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002882 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002883
2884 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002885
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002886 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002888 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
2891/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002892void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893{
2894 struct hci_dev *hdev = conn->hdev;
2895 struct hci_sco_hdr hdr;
2896
2897 BT_DBG("%s len %d", hdev->name, skb->len);
2898
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002899 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 hdr.dlen = skb->len;
2901
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002902 skb_push(skb, HCI_SCO_HDR_SIZE);
2903 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002904 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
2906 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002907 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002910 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
2913/* ---- HCI TX task (outgoing data) ---- */
2914
2915/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002916static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2917 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918{
2919 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002920 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002921 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002923 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002925
2926 rcu_read_lock();
2927
2928 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002929 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002931
2932 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2933 continue;
2934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 num++;
2936
2937 if (c->sent < min) {
2938 min = c->sent;
2939 conn = c;
2940 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002941
2942 if (hci_conn_num(hdev, type) == num)
2943 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 }
2945
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002946 rcu_read_unlock();
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002949 int cnt, q;
2950
2951 switch (conn->type) {
2952 case ACL_LINK:
2953 cnt = hdev->acl_cnt;
2954 break;
2955 case SCO_LINK:
2956 case ESCO_LINK:
2957 cnt = hdev->sco_cnt;
2958 break;
2959 case LE_LINK:
2960 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2961 break;
2962 default:
2963 cnt = 0;
2964 BT_ERR("Unknown link type");
2965 }
2966
2967 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 *quote = q ? q : 1;
2969 } else
2970 *quote = 0;
2971
2972 BT_DBG("conn %p quote %d", conn, *quote);
2973 return conn;
2974}
2975
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002976static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977{
2978 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002979 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
Ville Tervobae1f5d92011-02-10 22:38:53 -03002981 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002983 rcu_read_lock();
2984
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002986 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002987 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002988 BT_ERR("%s killing stalled connection %pMR",
2989 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002990 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 }
2992 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002993
2994 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995}
2996
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002997static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2998 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002999{
3000 struct hci_conn_hash *h = &hdev->conn_hash;
3001 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003002 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003003 struct hci_conn *conn;
3004 int cnt, q, conn_num = 0;
3005
3006 BT_DBG("%s", hdev->name);
3007
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003008 rcu_read_lock();
3009
3010 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003011 struct hci_chan *tmp;
3012
3013 if (conn->type != type)
3014 continue;
3015
3016 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3017 continue;
3018
3019 conn_num++;
3020
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003021 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003022 struct sk_buff *skb;
3023
3024 if (skb_queue_empty(&tmp->data_q))
3025 continue;
3026
3027 skb = skb_peek(&tmp->data_q);
3028 if (skb->priority < cur_prio)
3029 continue;
3030
3031 if (skb->priority > cur_prio) {
3032 num = 0;
3033 min = ~0;
3034 cur_prio = skb->priority;
3035 }
3036
3037 num++;
3038
3039 if (conn->sent < min) {
3040 min = conn->sent;
3041 chan = tmp;
3042 }
3043 }
3044
3045 if (hci_conn_num(hdev, type) == conn_num)
3046 break;
3047 }
3048
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003049 rcu_read_unlock();
3050
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003051 if (!chan)
3052 return NULL;
3053
3054 switch (chan->conn->type) {
3055 case ACL_LINK:
3056 cnt = hdev->acl_cnt;
3057 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003058 case AMP_LINK:
3059 cnt = hdev->block_cnt;
3060 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003061 case SCO_LINK:
3062 case ESCO_LINK:
3063 cnt = hdev->sco_cnt;
3064 break;
3065 case LE_LINK:
3066 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3067 break;
3068 default:
3069 cnt = 0;
3070 BT_ERR("Unknown link type");
3071 }
3072
3073 q = cnt / num;
3074 *quote = q ? q : 1;
3075 BT_DBG("chan %p quote %d", chan, *quote);
3076 return chan;
3077}
3078
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003079static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3080{
3081 struct hci_conn_hash *h = &hdev->conn_hash;
3082 struct hci_conn *conn;
3083 int num = 0;
3084
3085 BT_DBG("%s", hdev->name);
3086
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003087 rcu_read_lock();
3088
3089 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003090 struct hci_chan *chan;
3091
3092 if (conn->type != type)
3093 continue;
3094
3095 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3096 continue;
3097
3098 num++;
3099
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003100 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003101 struct sk_buff *skb;
3102
3103 if (chan->sent) {
3104 chan->sent = 0;
3105 continue;
3106 }
3107
3108 if (skb_queue_empty(&chan->data_q))
3109 continue;
3110
3111 skb = skb_peek(&chan->data_q);
3112 if (skb->priority >= HCI_PRIO_MAX - 1)
3113 continue;
3114
3115 skb->priority = HCI_PRIO_MAX - 1;
3116
3117 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003118 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003119 }
3120
3121 if (hci_conn_num(hdev, type) == num)
3122 break;
3123 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003124
3125 rcu_read_unlock();
3126
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003127}
3128
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003129static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3130{
3131 /* Calculate count of blocks used by this packet */
3132 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3133}
3134
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003135static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 if (!test_bit(HCI_RAW, &hdev->flags)) {
3138 /* ACL tx timeout must be longer than maximum
3139 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003140 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003141 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003142 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003144}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003146static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003147{
3148 unsigned int cnt = hdev->acl_cnt;
3149 struct hci_chan *chan;
3150 struct sk_buff *skb;
3151 int quote;
3152
3153 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003154
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003155 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003156 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003157 u32 priority = (skb_peek(&chan->data_q))->priority;
3158 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003159 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003160 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003161
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003162 /* Stop if priority has changed */
3163 if (skb->priority < priority)
3164 break;
3165
3166 skb = skb_dequeue(&chan->data_q);
3167
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003168 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003169 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003170
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 hci_send_frame(skb);
3172 hdev->acl_last_tx = jiffies;
3173
3174 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003175 chan->sent++;
3176 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 }
3178 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003179
3180 if (cnt != hdev->acl_cnt)
3181 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182}
3183
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003184static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003185{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003186 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003187 struct hci_chan *chan;
3188 struct sk_buff *skb;
3189 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003190 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003191
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003192 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003193
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003194 BT_DBG("%s", hdev->name);
3195
3196 if (hdev->dev_type == HCI_AMP)
3197 type = AMP_LINK;
3198 else
3199 type = ACL_LINK;
3200
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003201 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003202 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003203 u32 priority = (skb_peek(&chan->data_q))->priority;
3204 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3205 int blocks;
3206
3207 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003208 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003209
3210 /* Stop if priority has changed */
3211 if (skb->priority < priority)
3212 break;
3213
3214 skb = skb_dequeue(&chan->data_q);
3215
3216 blocks = __get_blocks(hdev, skb);
3217 if (blocks > hdev->block_cnt)
3218 return;
3219
3220 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003221 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003222
3223 hci_send_frame(skb);
3224 hdev->acl_last_tx = jiffies;
3225
3226 hdev->block_cnt -= blocks;
3227 quote -= blocks;
3228
3229 chan->sent += blocks;
3230 chan->conn->sent += blocks;
3231 }
3232 }
3233
3234 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003235 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003236}
3237
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003238static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003239{
3240 BT_DBG("%s", hdev->name);
3241
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003242 /* No ACL link over BR/EDR controller */
3243 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3244 return;
3245
3246 /* No AMP link over AMP controller */
3247 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003248 return;
3249
3250 switch (hdev->flow_ctl_mode) {
3251 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3252 hci_sched_acl_pkt(hdev);
3253 break;
3254
3255 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3256 hci_sched_acl_blk(hdev);
3257 break;
3258 }
3259}
3260
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003262static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263{
3264 struct hci_conn *conn;
3265 struct sk_buff *skb;
3266 int quote;
3267
3268 BT_DBG("%s", hdev->name);
3269
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003270 if (!hci_conn_num(hdev, SCO_LINK))
3271 return;
3272
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3274 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3275 BT_DBG("skb %p len %d", skb, skb->len);
3276 hci_send_frame(skb);
3277
3278 conn->sent++;
3279 if (conn->sent == ~0)
3280 conn->sent = 0;
3281 }
3282 }
3283}
3284
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003285static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003286{
3287 struct hci_conn *conn;
3288 struct sk_buff *skb;
3289 int quote;
3290
3291 BT_DBG("%s", hdev->name);
3292
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003293 if (!hci_conn_num(hdev, ESCO_LINK))
3294 return;
3295
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003296 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3297 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003298 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3299 BT_DBG("skb %p len %d", skb, skb->len);
3300 hci_send_frame(skb);
3301
3302 conn->sent++;
3303 if (conn->sent == ~0)
3304 conn->sent = 0;
3305 }
3306 }
3307}
3308
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003309static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003310{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003311 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003312 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003313 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003314
3315 BT_DBG("%s", hdev->name);
3316
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003317 if (!hci_conn_num(hdev, LE_LINK))
3318 return;
3319
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003320 if (!test_bit(HCI_RAW, &hdev->flags)) {
3321 /* LE tx timeout must be longer than maximum
3322 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003323 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003324 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003325 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003326 }
3327
3328 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003329 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003330 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003331 u32 priority = (skb_peek(&chan->data_q))->priority;
3332 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003333 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003334 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003335
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003336 /* Stop if priority has changed */
3337 if (skb->priority < priority)
3338 break;
3339
3340 skb = skb_dequeue(&chan->data_q);
3341
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003342 hci_send_frame(skb);
3343 hdev->le_last_tx = jiffies;
3344
3345 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003346 chan->sent++;
3347 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003348 }
3349 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003350
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003351 if (hdev->le_pkts)
3352 hdev->le_cnt = cnt;
3353 else
3354 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003355
3356 if (cnt != tmp)
3357 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003358}
3359
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003360static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003362 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 struct sk_buff *skb;
3364
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003365 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003366 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
Marcel Holtmann52de5992013-09-03 18:08:38 -07003368 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3369 /* Schedule queues and send stuff to HCI driver */
3370 hci_sched_acl(hdev);
3371 hci_sched_sco(hdev);
3372 hci_sched_esco(hdev);
3373 hci_sched_le(hdev);
3374 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 /* Send next queued raw (unknown type) packet */
3377 while ((skb = skb_dequeue(&hdev->raw_q)))
3378 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379}
3380
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003381/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
3383/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003384static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385{
3386 struct hci_acl_hdr *hdr = (void *) skb->data;
3387 struct hci_conn *conn;
3388 __u16 handle, flags;
3389
3390 skb_pull(skb, HCI_ACL_HDR_SIZE);
3391
3392 handle = __le16_to_cpu(hdr->handle);
3393 flags = hci_flags(handle);
3394 handle = hci_handle(handle);
3395
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003396 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003397 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
3399 hdev->stat.acl_rx++;
3400
3401 hci_dev_lock(hdev);
3402 conn = hci_conn_hash_lookup_handle(hdev, handle);
3403 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003404
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003406 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003407
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003409 l2cap_recv_acldata(conn, skb, flags);
3410 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003412 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003413 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 }
3415
3416 kfree_skb(skb);
3417}
3418
3419/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003420static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421{
3422 struct hci_sco_hdr *hdr = (void *) skb->data;
3423 struct hci_conn *conn;
3424 __u16 handle;
3425
3426 skb_pull(skb, HCI_SCO_HDR_SIZE);
3427
3428 handle = __le16_to_cpu(hdr->handle);
3429
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003430 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
3432 hdev->stat.sco_rx++;
3433
3434 hci_dev_lock(hdev);
3435 conn = hci_conn_hash_lookup_handle(hdev, handle);
3436 hci_dev_unlock(hdev);
3437
3438 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003440 sco_recv_scodata(conn, skb);
3441 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003443 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003444 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 }
3446
3447 kfree_skb(skb);
3448}
3449
Johan Hedberg9238f362013-03-05 20:37:48 +02003450static bool hci_req_is_complete(struct hci_dev *hdev)
3451{
3452 struct sk_buff *skb;
3453
3454 skb = skb_peek(&hdev->cmd_q);
3455 if (!skb)
3456 return true;
3457
3458 return bt_cb(skb)->req.start;
3459}
3460
Johan Hedberg42c6b122013-03-05 20:37:49 +02003461static void hci_resend_last(struct hci_dev *hdev)
3462{
3463 struct hci_command_hdr *sent;
3464 struct sk_buff *skb;
3465 u16 opcode;
3466
3467 if (!hdev->sent_cmd)
3468 return;
3469
3470 sent = (void *) hdev->sent_cmd->data;
3471 opcode = __le16_to_cpu(sent->opcode);
3472 if (opcode == HCI_OP_RESET)
3473 return;
3474
3475 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3476 if (!skb)
3477 return;
3478
3479 skb_queue_head(&hdev->cmd_q, skb);
3480 queue_work(hdev->workqueue, &hdev->cmd_work);
3481}
3482
Johan Hedberg9238f362013-03-05 20:37:48 +02003483void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3484{
3485 hci_req_complete_t req_complete = NULL;
3486 struct sk_buff *skb;
3487 unsigned long flags;
3488
3489 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3490
Johan Hedberg42c6b122013-03-05 20:37:49 +02003491 /* If the completed command doesn't match the last one that was
3492 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003493 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003494 if (!hci_sent_cmd_data(hdev, opcode)) {
3495 /* Some CSR based controllers generate a spontaneous
3496 * reset complete event during init and any pending
3497 * command will never be completed. In such a case we
3498 * need to resend whatever was the last sent
3499 * command.
3500 */
3501 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3502 hci_resend_last(hdev);
3503
Johan Hedberg9238f362013-03-05 20:37:48 +02003504 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003505 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003506
3507 /* If the command succeeded and there's still more commands in
3508 * this request the request is not yet complete.
3509 */
3510 if (!status && !hci_req_is_complete(hdev))
3511 return;
3512
3513 /* If this was the last command in a request the complete
3514 * callback would be found in hdev->sent_cmd instead of the
3515 * command queue (hdev->cmd_q).
3516 */
3517 if (hdev->sent_cmd) {
3518 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003519
3520 if (req_complete) {
3521 /* We must set the complete callback to NULL to
3522 * avoid calling the callback more than once if
3523 * this function gets called again.
3524 */
3525 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3526
Johan Hedberg9238f362013-03-05 20:37:48 +02003527 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003528 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003529 }
3530
3531 /* Remove all pending commands belonging to this request */
3532 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3533 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3534 if (bt_cb(skb)->req.start) {
3535 __skb_queue_head(&hdev->cmd_q, skb);
3536 break;
3537 }
3538
3539 req_complete = bt_cb(skb)->req.complete;
3540 kfree_skb(skb);
3541 }
3542 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3543
3544call_complete:
3545 if (req_complete)
3546 req_complete(hdev, status);
3547}
3548
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003549static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003551 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 struct sk_buff *skb;
3553
3554 BT_DBG("%s", hdev->name);
3555
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003557 /* Send copy to monitor */
3558 hci_send_to_monitor(hdev, skb);
3559
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 if (atomic_read(&hdev->promisc)) {
3561 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003562 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 }
3564
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003565 if (test_bit(HCI_RAW, &hdev->flags) ||
3566 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 kfree_skb(skb);
3568 continue;
3569 }
3570
3571 if (test_bit(HCI_INIT, &hdev->flags)) {
3572 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003573 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 case HCI_ACLDATA_PKT:
3575 case HCI_SCODATA_PKT:
3576 kfree_skb(skb);
3577 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003578 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579 }
3580
3581 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003582 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003584 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 hci_event_packet(hdev, skb);
3586 break;
3587
3588 case HCI_ACLDATA_PKT:
3589 BT_DBG("%s ACL data packet", hdev->name);
3590 hci_acldata_packet(hdev, skb);
3591 break;
3592
3593 case HCI_SCODATA_PKT:
3594 BT_DBG("%s SCO data packet", hdev->name);
3595 hci_scodata_packet(hdev, skb);
3596 break;
3597
3598 default:
3599 kfree_skb(skb);
3600 break;
3601 }
3602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603}
3604
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003605static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003607 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 struct sk_buff *skb;
3609
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003610 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3611 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003614 if (atomic_read(&hdev->cmd_cnt)) {
3615 skb = skb_dequeue(&hdev->cmd_q);
3616 if (!skb)
3617 return;
3618
Wei Yongjun7585b972009-02-25 18:29:52 +08003619 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003621 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003622 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 atomic_dec(&hdev->cmd_cnt);
3624 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003625 if (test_bit(HCI_RESET, &hdev->flags))
3626 del_timer(&hdev->cmd_timer);
3627 else
3628 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003629 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 } else {
3631 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003632 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 }
3634 }
3635}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003636
Andre Guedes31f79562012-04-24 21:02:53 -03003637u8 bdaddr_to_le(u8 bdaddr_type)
3638{
3639 switch (bdaddr_type) {
3640 case BDADDR_LE_PUBLIC:
3641 return ADDR_LE_DEV_PUBLIC;
3642
3643 default:
3644 /* Fallback to LE Random address type */
3645 return ADDR_LE_DEV_RANDOM;
3646 }
3647}