blob: 3d9f02b2f010dc33778f2365917d476c849336c4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300641 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700651 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500664 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 hci_update_ad(req);
667 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677}
678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716}
717
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 __u8 scan = opt;
721
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 __u8 auth = opt;
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 __u8 encrypt = opt;
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749{
750 __le16 policy = cpu_to_le16(opt);
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200753
754 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200756}
757
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900758/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200762 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200770 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200781
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
Andre Guedes6fbe1952012-02-03 17:47:58 -0300786 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300787 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200789 return true;
790
Andre Guedes6fbe1952012-02-03 17:47:58 -0300791 default:
792 return false;
793 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794}
795
Johan Hedbergff9ef572012-01-04 14:23:45 +0200796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200807 break;
808 case DISCOVERY_STARTING:
809 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300810 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200811 mgmt_discovering(hdev, 1);
812 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200813 case DISCOVERY_RESOLVING:
814 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300822void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Johan Hedberg30883512012-01-04 14:16:21 +0200824 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200825 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Johan Hedberg561aafb2012-01-04 13:31:59 +0200827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200829 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Johan Hedberg30883512012-01-04 14:16:21 +0200839 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 struct inquiry_entry *e;
841
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300842 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Johan Hedberg561aafb2012-01-04 13:31:59 +0200844 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200846 return e;
847 }
848
849 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300853 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854{
Johan Hedberg30883512012-01-04 14:16:21 +0200855 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 struct inquiry_entry *e;
857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300858 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
862 return e;
863 }
864
865 return NULL;
866}
867
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300869 bdaddr_t *bdaddr,
870 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
Johan Hedberga3d4e202012-01-09 00:53:02 +0200887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300888 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300898 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
Johan Hedberg31754052012-01-04 13:39:52 +0200906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300907 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Johan Hedberg30883512012-01-04 14:16:21 +0200909 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Szymon Janc2b2fec42012-11-20 11:38:54 +0100914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200916 if (ssp)
917 *ssp = data->ssp_mode;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200920 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
Johan Hedberga3d4e202012-01-09 00:53:02 +0200924 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300925 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
Johan Hedberg561aafb2012-01-04 13:31:59 +0200930 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200931 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932
Johan Hedberg561aafb2012-01-04 13:31:59 +0200933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200936 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
946
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
Johan Hedberg30883512012-01-04 14:16:21 +0200966 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
Johan Hedberg561aafb2012-01-04 13:31:59 +0200971 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200973
974 if (copied >= num)
975 break;
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
Andre Guedes3e13fa12013-03-27 20:04:56 -03001010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -ENODEV;
1031
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001037 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001040 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 do_inquiry = 1;
1042 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Marcel Holtmann04837f62006-07-03 10:02:33 +02001045 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001046
1047 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001050 if (err < 0)
1051 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 err = -ENOMEM;
1072 goto done;
1073 }
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001077 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001084 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001086 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001154void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001155{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001156 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001159
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001160 if (!lmp_le_capable(hdev))
1161 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177}
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179/* ---- HCI ioctl helpers ---- */
1180
1181int hci_dev_open(__u16 dev)
1182{
1183 struct hci_dev *hdev;
1184 int ret = 0;
1185
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001186 hdev = hci_dev_get(dev);
1187 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 return -ENODEV;
1189
1190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
Johan Hovold94324962012-03-15 14:48:41 +01001194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001199 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1200 ret = -ERFKILL;
1201 goto done;
1202 }
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if (test_bit(HCI_UP, &hdev->flags)) {
1205 ret = -EALREADY;
1206 goto done;
1207 }
1208
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 if (hdev->open(hdev)) {
1210 ret = -EIO;
1211 goto done;
1212 }
1213
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001214 atomic_set(&hdev->cmd_cnt, 1);
1215 set_bit(HCI_INIT, &hdev->flags);
1216
1217 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1218 ret = hdev->setup(hdev);
1219
1220 if (!ret) {
1221 /* Treat all non BR/EDR controllers as raw devices if
1222 * enable_hs is not set.
1223 */
1224 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1225 set_bit(HCI_RAW, &hdev->flags);
1226
1227 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1228 set_bit(HCI_RAW, &hdev->flags);
1229
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001230 if (!test_bit(HCI_RAW, &hdev->flags) &&
1231 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001232 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001235 clear_bit(HCI_INIT, &hdev->flags);
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (!ret) {
1238 hci_dev_hold(hdev);
1239 set_bit(HCI_UP, &hdev->flags);
1240 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001241 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001242 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001243 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001244 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001245 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001246 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001247 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001248 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001250 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001251 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001252 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 skb_queue_purge(&hdev->cmd_q);
1255 skb_queue_purge(&hdev->rx_q);
1256
1257 if (hdev->flush)
1258 hdev->flush(hdev);
1259
1260 if (hdev->sent_cmd) {
1261 kfree_skb(hdev->sent_cmd);
1262 hdev->sent_cmd = NULL;
1263 }
1264
1265 hdev->close(hdev);
1266 hdev->flags = 0;
1267 }
1268
1269done:
1270 hci_req_unlock(hdev);
1271 hci_dev_put(hdev);
1272 return ret;
1273}
1274
1275static int hci_dev_do_close(struct hci_dev *hdev)
1276{
1277 BT_DBG("%s %p", hdev->name, hdev);
1278
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001279 cancel_delayed_work(&hdev->power_off);
1280
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 hci_req_cancel(hdev, ENODEV);
1282 hci_req_lock(hdev);
1283
1284 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001285 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 hci_req_unlock(hdev);
1287 return 0;
1288 }
1289
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001290 /* Flush RX and TX works */
1291 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001292 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001294 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001295 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001296 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001297 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001298 }
1299
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001300 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001301 cancel_delayed_work(&hdev->service_cache);
1302
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001303 cancel_delayed_work_sync(&hdev->le_scan_disable);
1304
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001305 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001306 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001308 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
1310 hci_notify(hdev, HCI_DEV_DOWN);
1311
1312 if (hdev->flush)
1313 hdev->flush(hdev);
1314
1315 /* Reset device */
1316 skb_queue_purge(&hdev->cmd_q);
1317 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001318 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001319 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001321 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 clear_bit(HCI_INIT, &hdev->flags);
1323 }
1324
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001325 /* flush cmd work */
1326 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 /* Drop queues */
1329 skb_queue_purge(&hdev->rx_q);
1330 skb_queue_purge(&hdev->cmd_q);
1331 skb_queue_purge(&hdev->raw_q);
1332
1333 /* Drop last sent command */
1334 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001335 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 kfree_skb(hdev->sent_cmd);
1337 hdev->sent_cmd = NULL;
1338 }
1339
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001340 kfree_skb(hdev->recv_evt);
1341 hdev->recv_evt = NULL;
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 /* After this point our queues are empty
1344 * and no tasks are scheduled. */
1345 hdev->close(hdev);
1346
Johan Hedberg35b973c2013-03-15 17:06:59 -05001347 /* Clear flags */
1348 hdev->flags = 0;
1349 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1350
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001351 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1352 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001353 hci_dev_lock(hdev);
1354 mgmt_powered(hdev, 0);
1355 hci_dev_unlock(hdev);
1356 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001357
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001358 /* Controller radio is available but is currently powered down */
1359 hdev->amp_status = 0;
1360
Johan Hedberge59fda82012-02-22 18:11:53 +02001361 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001362 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 hci_req_unlock(hdev);
1365
1366 hci_dev_put(hdev);
1367 return 0;
1368}
1369
1370int hci_dev_close(__u16 dev)
1371{
1372 struct hci_dev *hdev;
1373 int err;
1374
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001375 hdev = hci_dev_get(dev);
1376 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001378
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001379 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1380 err = -EBUSY;
1381 goto done;
1382 }
1383
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001384 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1385 cancel_delayed_work(&hdev->power_off);
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001388
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001389done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 hci_dev_put(hdev);
1391 return err;
1392}
1393
1394int hci_dev_reset(__u16 dev)
1395{
1396 struct hci_dev *hdev;
1397 int ret = 0;
1398
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001399 hdev = hci_dev_get(dev);
1400 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return -ENODEV;
1402
1403 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Marcel Holtmann808a0492013-08-26 20:57:58 -07001405 if (!test_bit(HCI_UP, &hdev->flags)) {
1406 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001410 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1411 ret = -EBUSY;
1412 goto done;
1413 }
1414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 /* Drop queues */
1416 skb_queue_purge(&hdev->rx_q);
1417 skb_queue_purge(&hdev->cmd_q);
1418
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001419 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001420 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001422 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
1424 if (hdev->flush)
1425 hdev->flush(hdev);
1426
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001427 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001428 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001431 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 hci_req_unlock(hdev);
1435 hci_dev_put(hdev);
1436 return ret;
1437}
1438
1439int hci_dev_reset_stat(__u16 dev)
1440{
1441 struct hci_dev *hdev;
1442 int ret = 0;
1443
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001444 hdev = hci_dev_get(dev);
1445 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return -ENODEV;
1447
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001448 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1449 ret = -EBUSY;
1450 goto done;
1451 }
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1454
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001455done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return ret;
1458}
1459
1460int hci_dev_cmd(unsigned int cmd, void __user *arg)
1461{
1462 struct hci_dev *hdev;
1463 struct hci_dev_req dr;
1464 int err = 0;
1465
1466 if (copy_from_user(&dr, arg, sizeof(dr)))
1467 return -EFAULT;
1468
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001469 hdev = hci_dev_get(dr.dev_id);
1470 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return -ENODEV;
1472
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001473 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1474 err = -EBUSY;
1475 goto done;
1476 }
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 switch (cmd) {
1479 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001480 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1481 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 break;
1483
1484 case HCISETENCRYPT:
1485 if (!lmp_encrypt_capable(hdev)) {
1486 err = -EOPNOTSUPP;
1487 break;
1488 }
1489
1490 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1491 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001492 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1493 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 if (err)
1495 break;
1496 }
1497
Johan Hedberg01178cd2013-03-05 20:37:41 +02001498 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1499 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 break;
1501
1502 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001503 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1504 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 break;
1506
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001507 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001508 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1509 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001510 break;
1511
1512 case HCISETLINKMODE:
1513 hdev->link_mode = ((__u16) dr.dev_opt) &
1514 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1515 break;
1516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 case HCISETPTYPE:
1518 hdev->pkt_type = (__u16) dr.dev_opt;
1519 break;
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001522 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1523 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 break;
1525
1526 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001527 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1528 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 break;
1530
1531 default:
1532 err = -EINVAL;
1533 break;
1534 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001535
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001536done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 hci_dev_put(hdev);
1538 return err;
1539}
1540
1541int hci_get_dev_list(void __user *arg)
1542{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001543 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 struct hci_dev_list_req *dl;
1545 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 int n = 0, size, err;
1547 __u16 dev_num;
1548
1549 if (get_user(dev_num, (__u16 __user *) arg))
1550 return -EFAULT;
1551
1552 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1553 return -EINVAL;
1554
1555 size = sizeof(*dl) + dev_num * sizeof(*dr);
1556
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001557 dl = kzalloc(size, GFP_KERNEL);
1558 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 return -ENOMEM;
1560
1561 dr = dl->dev_req;
1562
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001563 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001564 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001565 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001566 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001567
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001568 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1569 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 (dr + n)->dev_id = hdev->id;
1572 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (++n >= dev_num)
1575 break;
1576 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001577 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579 dl->dev_num = n;
1580 size = sizeof(*dl) + n * sizeof(*dr);
1581
1582 err = copy_to_user(arg, dl, size);
1583 kfree(dl);
1584
1585 return err ? -EFAULT : 0;
1586}
1587
1588int hci_get_dev_info(void __user *arg)
1589{
1590 struct hci_dev *hdev;
1591 struct hci_dev_info di;
1592 int err = 0;
1593
1594 if (copy_from_user(&di, arg, sizeof(di)))
1595 return -EFAULT;
1596
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001597 hdev = hci_dev_get(di.dev_id);
1598 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 return -ENODEV;
1600
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001601 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001602 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001603
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001604 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1605 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 strcpy(di.name, hdev->name);
1608 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001609 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 di.flags = hdev->flags;
1611 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001612 if (lmp_bredr_capable(hdev)) {
1613 di.acl_mtu = hdev->acl_mtu;
1614 di.acl_pkts = hdev->acl_pkts;
1615 di.sco_mtu = hdev->sco_mtu;
1616 di.sco_pkts = hdev->sco_pkts;
1617 } else {
1618 di.acl_mtu = hdev->le_mtu;
1619 di.acl_pkts = hdev->le_pkts;
1620 di.sco_mtu = 0;
1621 di.sco_pkts = 0;
1622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 di.link_policy = hdev->link_policy;
1624 di.link_mode = hdev->link_mode;
1625
1626 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1627 memcpy(&di.features, &hdev->features, sizeof(di.features));
1628
1629 if (copy_to_user(arg, &di, sizeof(di)))
1630 err = -EFAULT;
1631
1632 hci_dev_put(hdev);
1633
1634 return err;
1635}
1636
1637/* ---- Interface to HCI drivers ---- */
1638
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001639static int hci_rfkill_set_block(void *data, bool blocked)
1640{
1641 struct hci_dev *hdev = data;
1642
1643 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1646 return -EBUSY;
1647
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001648 if (!blocked)
1649 return 0;
1650
1651 hci_dev_do_close(hdev);
1652
1653 return 0;
1654}
1655
1656static const struct rfkill_ops hci_rfkill_ops = {
1657 .set_block = hci_rfkill_set_block,
1658};
1659
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001660static void hci_power_on(struct work_struct *work)
1661{
1662 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001663 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001664
1665 BT_DBG("%s", hdev->name);
1666
Johan Hedberg96570ff2013-05-29 09:51:29 +03001667 err = hci_dev_open(hdev->id);
1668 if (err < 0) {
1669 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001670 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001671 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001672
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001673 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001674 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1675 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001676
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001677 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001678 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001679}
1680
1681static void hci_power_off(struct work_struct *work)
1682{
Johan Hedberg32435532011-11-07 22:16:04 +02001683 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001684 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001685
1686 BT_DBG("%s", hdev->name);
1687
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001688 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001689}
1690
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001691static void hci_discov_off(struct work_struct *work)
1692{
1693 struct hci_dev *hdev;
1694 u8 scan = SCAN_PAGE;
1695
1696 hdev = container_of(work, struct hci_dev, discov_off.work);
1697
1698 BT_DBG("%s", hdev->name);
1699
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001700 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001701
1702 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1703
1704 hdev->discov_timeout = 0;
1705
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001706 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001707}
1708
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001709int hci_uuids_clear(struct hci_dev *hdev)
1710{
Johan Hedberg48210022013-01-27 00:31:28 +02001711 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001712
Johan Hedberg48210022013-01-27 00:31:28 +02001713 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1714 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001715 kfree(uuid);
1716 }
1717
1718 return 0;
1719}
1720
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001721int hci_link_keys_clear(struct hci_dev *hdev)
1722{
1723 struct list_head *p, *n;
1724
1725 list_for_each_safe(p, n, &hdev->link_keys) {
1726 struct link_key *key;
1727
1728 key = list_entry(p, struct link_key, list);
1729
1730 list_del(p);
1731 kfree(key);
1732 }
1733
1734 return 0;
1735}
1736
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001737int hci_smp_ltks_clear(struct hci_dev *hdev)
1738{
1739 struct smp_ltk *k, *tmp;
1740
1741 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1742 list_del(&k->list);
1743 kfree(k);
1744 }
1745
1746 return 0;
1747}
1748
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001749struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1750{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001751 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001752
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001753 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001754 if (bacmp(bdaddr, &k->bdaddr) == 0)
1755 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001756
1757 return NULL;
1758}
1759
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301760static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001761 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001762{
1763 /* Legacy key */
1764 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301765 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001766
1767 /* Debug keys are insecure so don't store them persistently */
1768 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301769 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001770
1771 /* Changed combination key and there's no previous one */
1772 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301773 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001774
1775 /* Security mode 3 case */
1776 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301777 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001778
1779 /* Neither local nor remote side had no-bonding as requirement */
1780 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301781 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001782
1783 /* Local side had dedicated bonding as requirement */
1784 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301785 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001786
1787 /* Remote side had dedicated bonding as requirement */
1788 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301789 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001790
1791 /* If none of the above criteria match, then don't store the key
1792 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301793 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001794}
1795
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001796struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001797{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001798 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001799
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001800 list_for_each_entry(k, &hdev->long_term_keys, list) {
1801 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001802 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001803 continue;
1804
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001805 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001806 }
1807
1808 return NULL;
1809}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001810
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001811struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001812 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001813{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001814 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001815
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001816 list_for_each_entry(k, &hdev->long_term_keys, list)
1817 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001818 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001819 return k;
1820
1821 return NULL;
1822}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001823
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001824int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001825 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001826{
1827 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301828 u8 old_key_type;
1829 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001830
1831 old_key = hci_find_link_key(hdev, bdaddr);
1832 if (old_key) {
1833 old_key_type = old_key->type;
1834 key = old_key;
1835 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001836 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001837 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1838 if (!key)
1839 return -ENOMEM;
1840 list_add(&key->list, &hdev->link_keys);
1841 }
1842
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001843 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001844
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001845 /* Some buggy controller combinations generate a changed
1846 * combination key for legacy pairing even when there's no
1847 * previous key */
1848 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001849 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001850 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001851 if (conn)
1852 conn->key_type = type;
1853 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001854
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001855 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001856 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001857 key->pin_len = pin_len;
1858
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001859 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001860 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001861 else
1862 key->type = type;
1863
Johan Hedberg4df378a2011-04-28 11:29:03 -07001864 if (!new_key)
1865 return 0;
1866
1867 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1868
Johan Hedberg744cf192011-11-08 20:40:14 +02001869 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001870
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301871 if (conn)
1872 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001873
1874 return 0;
1875}
1876
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001877int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001878 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001879 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001880{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001881 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001882
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001883 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1884 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001885
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001886 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1887 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001888 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001889 else {
1890 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001891 if (!key)
1892 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001893 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001894 }
1895
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001896 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001897 key->bdaddr_type = addr_type;
1898 memcpy(key->val, tk, sizeof(key->val));
1899 key->authenticated = authenticated;
1900 key->ediv = ediv;
1901 key->enc_size = enc_size;
1902 key->type = type;
1903 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001904
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001905 if (!new_key)
1906 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001908 if (type & HCI_SMP_LTK)
1909 mgmt_new_ltk(hdev, key, 1);
1910
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001911 return 0;
1912}
1913
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001914int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1915{
1916 struct link_key *key;
1917
1918 key = hci_find_link_key(hdev, bdaddr);
1919 if (!key)
1920 return -ENOENT;
1921
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001922 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001923
1924 list_del(&key->list);
1925 kfree(key);
1926
1927 return 0;
1928}
1929
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001930int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1931{
1932 struct smp_ltk *k, *tmp;
1933
1934 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1935 if (bacmp(bdaddr, &k->bdaddr))
1936 continue;
1937
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001938 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001939
1940 list_del(&k->list);
1941 kfree(k);
1942 }
1943
1944 return 0;
1945}
1946
Ville Tervo6bd32322011-02-16 16:32:41 +02001947/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001948static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001949{
1950 struct hci_dev *hdev = (void *) arg;
1951
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001952 if (hdev->sent_cmd) {
1953 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1954 u16 opcode = __le16_to_cpu(sent->opcode);
1955
1956 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1957 } else {
1958 BT_ERR("%s command tx timeout", hdev->name);
1959 }
1960
Ville Tervo6bd32322011-02-16 16:32:41 +02001961 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001962 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001963}
1964
Szymon Janc2763eda2011-03-22 13:12:22 +01001965struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001966 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001967{
1968 struct oob_data *data;
1969
1970 list_for_each_entry(data, &hdev->remote_oob_data, list)
1971 if (bacmp(bdaddr, &data->bdaddr) == 0)
1972 return data;
1973
1974 return NULL;
1975}
1976
1977int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1978{
1979 struct oob_data *data;
1980
1981 data = hci_find_remote_oob_data(hdev, bdaddr);
1982 if (!data)
1983 return -ENOENT;
1984
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001985 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001986
1987 list_del(&data->list);
1988 kfree(data);
1989
1990 return 0;
1991}
1992
1993int hci_remote_oob_data_clear(struct hci_dev *hdev)
1994{
1995 struct oob_data *data, *n;
1996
1997 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1998 list_del(&data->list);
1999 kfree(data);
2000 }
2001
2002 return 0;
2003}
2004
2005int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002006 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002007{
2008 struct oob_data *data;
2009
2010 data = hci_find_remote_oob_data(hdev, bdaddr);
2011
2012 if (!data) {
2013 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2014 if (!data)
2015 return -ENOMEM;
2016
2017 bacpy(&data->bdaddr, bdaddr);
2018 list_add(&data->list, &hdev->remote_oob_data);
2019 }
2020
2021 memcpy(data->hash, hash, sizeof(data->hash));
2022 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2023
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002024 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002025
2026 return 0;
2027}
2028
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002029struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002030{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002031 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002032
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002033 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002034 if (bacmp(bdaddr, &b->bdaddr) == 0)
2035 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002036
2037 return NULL;
2038}
2039
2040int hci_blacklist_clear(struct hci_dev *hdev)
2041{
2042 struct list_head *p, *n;
2043
2044 list_for_each_safe(p, n, &hdev->blacklist) {
2045 struct bdaddr_list *b;
2046
2047 b = list_entry(p, struct bdaddr_list, list);
2048
2049 list_del(p);
2050 kfree(b);
2051 }
2052
2053 return 0;
2054}
2055
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002056int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002057{
2058 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002059
2060 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2061 return -EBADF;
2062
Antti Julku5e762442011-08-25 16:48:02 +03002063 if (hci_blacklist_lookup(hdev, bdaddr))
2064 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002065
2066 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002067 if (!entry)
2068 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002069
2070 bacpy(&entry->bdaddr, bdaddr);
2071
2072 list_add(&entry->list, &hdev->blacklist);
2073
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002074 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002075}
2076
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002077int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002078{
2079 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002080
Szymon Janc1ec918c2011-11-16 09:32:21 +01002081 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002082 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002083
2084 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002085 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002086 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002087
2088 list_del(&entry->list);
2089 kfree(entry);
2090
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002091 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002092}
2093
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002094static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002095{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002096 if (status) {
2097 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002098
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002099 hci_dev_lock(hdev);
2100 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2101 hci_dev_unlock(hdev);
2102 return;
2103 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002104}
2105
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002106static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002107{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002108 /* General inquiry access code (GIAC) */
2109 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2110 struct hci_request req;
2111 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002112 int err;
2113
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002114 if (status) {
2115 BT_ERR("Failed to disable LE scanning: status %d", status);
2116 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002117 }
2118
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002119 switch (hdev->discovery.type) {
2120 case DISCOV_TYPE_LE:
2121 hci_dev_lock(hdev);
2122 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2123 hci_dev_unlock(hdev);
2124 break;
2125
2126 case DISCOV_TYPE_INTERLEAVED:
2127 hci_req_init(&req, hdev);
2128
2129 memset(&cp, 0, sizeof(cp));
2130 memcpy(&cp.lap, lap, sizeof(cp.lap));
2131 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2132 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2133
2134 hci_dev_lock(hdev);
2135
2136 hci_inquiry_cache_flush(hdev);
2137
2138 err = hci_req_run(&req, inquiry_complete);
2139 if (err) {
2140 BT_ERR("Inquiry request failed: err %d", err);
2141 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2142 }
2143
2144 hci_dev_unlock(hdev);
2145 break;
2146 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002147}
2148
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002149static void le_scan_disable_work(struct work_struct *work)
2150{
2151 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002152 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002153 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002154 struct hci_request req;
2155 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002156
2157 BT_DBG("%s", hdev->name);
2158
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002159 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002160
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002161 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002162 cp.enable = LE_SCAN_DISABLE;
2163 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002164
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002165 err = hci_req_run(&req, le_scan_disable_work_complete);
2166 if (err)
2167 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002168}
2169
David Herrmann9be0dab2012-04-22 14:39:57 +02002170/* Alloc HCI device */
2171struct hci_dev *hci_alloc_dev(void)
2172{
2173 struct hci_dev *hdev;
2174
2175 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2176 if (!hdev)
2177 return NULL;
2178
David Herrmannb1b813d2012-04-22 14:39:58 +02002179 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2180 hdev->esco_type = (ESCO_HV1);
2181 hdev->link_mode = (HCI_LM_ACCEPT);
2182 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002183 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2184 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002185
David Herrmannb1b813d2012-04-22 14:39:58 +02002186 hdev->sniff_max_interval = 800;
2187 hdev->sniff_min_interval = 80;
2188
2189 mutex_init(&hdev->lock);
2190 mutex_init(&hdev->req_lock);
2191
2192 INIT_LIST_HEAD(&hdev->mgmt_pending);
2193 INIT_LIST_HEAD(&hdev->blacklist);
2194 INIT_LIST_HEAD(&hdev->uuids);
2195 INIT_LIST_HEAD(&hdev->link_keys);
2196 INIT_LIST_HEAD(&hdev->long_term_keys);
2197 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002198 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002199
2200 INIT_WORK(&hdev->rx_work, hci_rx_work);
2201 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2202 INIT_WORK(&hdev->tx_work, hci_tx_work);
2203 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002204
David Herrmannb1b813d2012-04-22 14:39:58 +02002205 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2206 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2207 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2208
David Herrmannb1b813d2012-04-22 14:39:58 +02002209 skb_queue_head_init(&hdev->rx_q);
2210 skb_queue_head_init(&hdev->cmd_q);
2211 skb_queue_head_init(&hdev->raw_q);
2212
2213 init_waitqueue_head(&hdev->req_wait_q);
2214
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002215 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002216
David Herrmannb1b813d2012-04-22 14:39:58 +02002217 hci_init_sysfs(hdev);
2218 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002219
2220 return hdev;
2221}
2222EXPORT_SYMBOL(hci_alloc_dev);
2223
2224/* Free HCI device */
2225void hci_free_dev(struct hci_dev *hdev)
2226{
David Herrmann9be0dab2012-04-22 14:39:57 +02002227 /* will free via device release */
2228 put_device(&hdev->dev);
2229}
2230EXPORT_SYMBOL(hci_free_dev);
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232/* Register HCI device */
2233int hci_register_dev(struct hci_dev *hdev)
2234{
David Herrmannb1b813d2012-04-22 14:39:58 +02002235 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
David Herrmann010666a2012-01-07 15:47:07 +01002237 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 return -EINVAL;
2239
Mat Martineau08add512011-11-02 16:18:36 -07002240 /* Do not allow HCI_AMP devices to register at index 0,
2241 * so the index can be used as the AMP controller ID.
2242 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002243 switch (hdev->dev_type) {
2244 case HCI_BREDR:
2245 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2246 break;
2247 case HCI_AMP:
2248 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2249 break;
2250 default:
2251 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002253
Sasha Levin3df92b32012-05-27 22:36:56 +02002254 if (id < 0)
2255 return id;
2256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 sprintf(hdev->name, "hci%d", id);
2258 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002259
2260 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2261
Kees Cookd8537542013-07-03 15:04:57 -07002262 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2263 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002264 if (!hdev->workqueue) {
2265 error = -ENOMEM;
2266 goto err;
2267 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002268
Kees Cookd8537542013-07-03 15:04:57 -07002269 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2270 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002271 if (!hdev->req_workqueue) {
2272 destroy_workqueue(hdev->workqueue);
2273 error = -ENOMEM;
2274 goto err;
2275 }
2276
David Herrmann33ca9542011-10-08 14:58:49 +02002277 error = hci_add_sysfs(hdev);
2278 if (error < 0)
2279 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002281 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002282 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2283 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002284 if (hdev->rfkill) {
2285 if (rfkill_register(hdev->rfkill) < 0) {
2286 rfkill_destroy(hdev->rfkill);
2287 hdev->rfkill = NULL;
2288 }
2289 }
2290
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002291 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002292
2293 if (hdev->dev_type != HCI_AMP)
2294 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2295
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002296 write_lock(&hci_dev_list_lock);
2297 list_add(&hdev->list, &hci_dev_list);
2298 write_unlock(&hci_dev_list_lock);
2299
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002301 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Johan Hedberg19202572013-01-14 22:33:51 +02002303 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002306
David Herrmann33ca9542011-10-08 14:58:49 +02002307err_wqueue:
2308 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002309 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002310err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002311 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002312
David Herrmann33ca9542011-10-08 14:58:49 +02002313 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314}
2315EXPORT_SYMBOL(hci_register_dev);
2316
2317/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002318void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
Sasha Levin3df92b32012-05-27 22:36:56 +02002320 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002321
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002322 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
Johan Hovold94324962012-03-15 14:48:41 +01002324 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2325
Sasha Levin3df92b32012-05-27 22:36:56 +02002326 id = hdev->id;
2327
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002328 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002330 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
2332 hci_dev_do_close(hdev);
2333
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302334 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002335 kfree_skb(hdev->reassembly[i]);
2336
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002337 cancel_work_sync(&hdev->power_on);
2338
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002339 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002340 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002341 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002342 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002343 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002344 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002345
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002346 /* mgmt_index_removed should take care of emptying the
2347 * pending list */
2348 BUG_ON(!list_empty(&hdev->mgmt_pending));
2349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 hci_notify(hdev, HCI_DEV_UNREG);
2351
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002352 if (hdev->rfkill) {
2353 rfkill_unregister(hdev->rfkill);
2354 rfkill_destroy(hdev->rfkill);
2355 }
2356
David Herrmannce242972011-10-08 14:58:48 +02002357 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002358
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002359 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002360 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002361
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002362 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002363 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002364 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002365 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002366 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002367 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002368 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002369
David Herrmanndc946bd2012-01-07 15:47:24 +01002370 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002371
2372 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373}
2374EXPORT_SYMBOL(hci_unregister_dev);
2375
2376/* Suspend HCI device */
2377int hci_suspend_dev(struct hci_dev *hdev)
2378{
2379 hci_notify(hdev, HCI_DEV_SUSPEND);
2380 return 0;
2381}
2382EXPORT_SYMBOL(hci_suspend_dev);
2383
2384/* Resume HCI device */
2385int hci_resume_dev(struct hci_dev *hdev)
2386{
2387 hci_notify(hdev, HCI_DEV_RESUME);
2388 return 0;
2389}
2390EXPORT_SYMBOL(hci_resume_dev);
2391
Marcel Holtmann76bca882009-11-18 00:40:39 +01002392/* Receive frame from HCI drivers */
2393int hci_recv_frame(struct sk_buff *skb)
2394{
2395 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2396 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002397 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002398 kfree_skb(skb);
2399 return -ENXIO;
2400 }
2401
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002402 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002403 bt_cb(skb)->incoming = 1;
2404
2405 /* Time stamp */
2406 __net_timestamp(skb);
2407
Marcel Holtmann76bca882009-11-18 00:40:39 +01002408 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002409 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002410
Marcel Holtmann76bca882009-11-18 00:40:39 +01002411 return 0;
2412}
2413EXPORT_SYMBOL(hci_recv_frame);
2414
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302415static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002416 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302417{
2418 int len = 0;
2419 int hlen = 0;
2420 int remain = count;
2421 struct sk_buff *skb;
2422 struct bt_skb_cb *scb;
2423
2424 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002425 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302426 return -EILSEQ;
2427
2428 skb = hdev->reassembly[index];
2429
2430 if (!skb) {
2431 switch (type) {
2432 case HCI_ACLDATA_PKT:
2433 len = HCI_MAX_FRAME_SIZE;
2434 hlen = HCI_ACL_HDR_SIZE;
2435 break;
2436 case HCI_EVENT_PKT:
2437 len = HCI_MAX_EVENT_SIZE;
2438 hlen = HCI_EVENT_HDR_SIZE;
2439 break;
2440 case HCI_SCODATA_PKT:
2441 len = HCI_MAX_SCO_SIZE;
2442 hlen = HCI_SCO_HDR_SIZE;
2443 break;
2444 }
2445
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002446 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302447 if (!skb)
2448 return -ENOMEM;
2449
2450 scb = (void *) skb->cb;
2451 scb->expect = hlen;
2452 scb->pkt_type = type;
2453
2454 skb->dev = (void *) hdev;
2455 hdev->reassembly[index] = skb;
2456 }
2457
2458 while (count) {
2459 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002460 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302461
2462 memcpy(skb_put(skb, len), data, len);
2463
2464 count -= len;
2465 data += len;
2466 scb->expect -= len;
2467 remain = count;
2468
2469 switch (type) {
2470 case HCI_EVENT_PKT:
2471 if (skb->len == HCI_EVENT_HDR_SIZE) {
2472 struct hci_event_hdr *h = hci_event_hdr(skb);
2473 scb->expect = h->plen;
2474
2475 if (skb_tailroom(skb) < scb->expect) {
2476 kfree_skb(skb);
2477 hdev->reassembly[index] = NULL;
2478 return -ENOMEM;
2479 }
2480 }
2481 break;
2482
2483 case HCI_ACLDATA_PKT:
2484 if (skb->len == HCI_ACL_HDR_SIZE) {
2485 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2486 scb->expect = __le16_to_cpu(h->dlen);
2487
2488 if (skb_tailroom(skb) < scb->expect) {
2489 kfree_skb(skb);
2490 hdev->reassembly[index] = NULL;
2491 return -ENOMEM;
2492 }
2493 }
2494 break;
2495
2496 case HCI_SCODATA_PKT:
2497 if (skb->len == HCI_SCO_HDR_SIZE) {
2498 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2499 scb->expect = h->dlen;
2500
2501 if (skb_tailroom(skb) < scb->expect) {
2502 kfree_skb(skb);
2503 hdev->reassembly[index] = NULL;
2504 return -ENOMEM;
2505 }
2506 }
2507 break;
2508 }
2509
2510 if (scb->expect == 0) {
2511 /* Complete frame */
2512
2513 bt_cb(skb)->pkt_type = type;
2514 hci_recv_frame(skb);
2515
2516 hdev->reassembly[index] = NULL;
2517 return remain;
2518 }
2519 }
2520
2521 return remain;
2522}
2523
Marcel Holtmannef222012007-07-11 06:42:04 +02002524int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2525{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302526 int rem = 0;
2527
Marcel Holtmannef222012007-07-11 06:42:04 +02002528 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2529 return -EILSEQ;
2530
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002531 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002532 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302533 if (rem < 0)
2534 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002535
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302536 data += (count - rem);
2537 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002538 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002539
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302540 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002541}
2542EXPORT_SYMBOL(hci_recv_fragment);
2543
Suraj Sumangala99811512010-07-14 13:02:19 +05302544#define STREAM_REASSEMBLY 0
2545
2546int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2547{
2548 int type;
2549 int rem = 0;
2550
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002551 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302552 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2553
2554 if (!skb) {
2555 struct { char type; } *pkt;
2556
2557 /* Start of the frame */
2558 pkt = data;
2559 type = pkt->type;
2560
2561 data++;
2562 count--;
2563 } else
2564 type = bt_cb(skb)->pkt_type;
2565
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002566 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002567 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302568 if (rem < 0)
2569 return rem;
2570
2571 data += (count - rem);
2572 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002573 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302574
2575 return rem;
2576}
2577EXPORT_SYMBOL(hci_recv_stream_fragment);
2578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579/* ---- Interface to upper protocols ---- */
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581int hci_register_cb(struct hci_cb *cb)
2582{
2583 BT_DBG("%p name %s", cb, cb->name);
2584
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002585 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002587 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
2589 return 0;
2590}
2591EXPORT_SYMBOL(hci_register_cb);
2592
2593int hci_unregister_cb(struct hci_cb *cb)
2594{
2595 BT_DBG("%p name %s", cb, cb->name);
2596
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002597 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002599 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
2601 return 0;
2602}
2603EXPORT_SYMBOL(hci_unregister_cb);
2604
2605static int hci_send_frame(struct sk_buff *skb)
2606{
2607 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2608
2609 if (!hdev) {
2610 kfree_skb(skb);
2611 return -ENODEV;
2612 }
2613
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002614 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002616 /* Time stamp */
2617 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002619 /* Send copy to monitor */
2620 hci_send_to_monitor(hdev, skb);
2621
2622 if (atomic_read(&hdev->promisc)) {
2623 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002624 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 }
2626
2627 /* Get rid of skb owner, prior to sending to the driver. */
2628 skb_orphan(skb);
2629
2630 return hdev->send(skb);
2631}
2632
Johan Hedberg3119ae92013-03-05 20:37:44 +02002633void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2634{
2635 skb_queue_head_init(&req->cmd_q);
2636 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002637 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002638}
2639
2640int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2641{
2642 struct hci_dev *hdev = req->hdev;
2643 struct sk_buff *skb;
2644 unsigned long flags;
2645
2646 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2647
Andre Guedes5d73e032013-03-08 11:20:16 -03002648 /* If an error occured during request building, remove all HCI
2649 * commands queued on the HCI request queue.
2650 */
2651 if (req->err) {
2652 skb_queue_purge(&req->cmd_q);
2653 return req->err;
2654 }
2655
Johan Hedberg3119ae92013-03-05 20:37:44 +02002656 /* Do not allow empty requests */
2657 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002658 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002659
2660 skb = skb_peek_tail(&req->cmd_q);
2661 bt_cb(skb)->req.complete = complete;
2662
2663 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2664 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2665 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2666
2667 queue_work(hdev->workqueue, &hdev->cmd_work);
2668
2669 return 0;
2670}
2671
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002672static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002673 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674{
2675 int len = HCI_COMMAND_HDR_SIZE + plen;
2676 struct hci_command_hdr *hdr;
2677 struct sk_buff *skb;
2678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002680 if (!skb)
2681 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
2683 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002684 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 hdr->plen = plen;
2686
2687 if (plen)
2688 memcpy(skb_put(skb, plen), param, plen);
2689
2690 BT_DBG("skb len %d", skb->len);
2691
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002692 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002694
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002695 return skb;
2696}
2697
2698/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002699int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2700 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002701{
2702 struct sk_buff *skb;
2703
2704 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2705
2706 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2707 if (!skb) {
2708 BT_ERR("%s no memory for command", hdev->name);
2709 return -ENOMEM;
2710 }
2711
Johan Hedberg11714b32013-03-05 20:37:47 +02002712 /* Stand-alone HCI commands must be flaged as
2713 * single-command requests.
2714 */
2715 bt_cb(skb)->req.start = true;
2716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002718 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
2720 return 0;
2721}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Johan Hedberg71c76a12013-03-05 20:37:46 +02002723/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002724void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2725 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002726{
2727 struct hci_dev *hdev = req->hdev;
2728 struct sk_buff *skb;
2729
2730 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2731
Andre Guedes34739c12013-03-08 11:20:18 -03002732 /* If an error occured during request building, there is no point in
2733 * queueing the HCI command. We can simply return.
2734 */
2735 if (req->err)
2736 return;
2737
Johan Hedberg71c76a12013-03-05 20:37:46 +02002738 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2739 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002740 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2741 hdev->name, opcode);
2742 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002743 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002744 }
2745
2746 if (skb_queue_empty(&req->cmd_q))
2747 bt_cb(skb)->req.start = true;
2748
Johan Hedberg02350a72013-04-03 21:50:29 +03002749 bt_cb(skb)->req.event = event;
2750
Johan Hedberg71c76a12013-03-05 20:37:46 +02002751 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002752}
2753
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002754void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2755 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002756{
2757 hci_req_add_ev(req, opcode, plen, param, 0);
2758}
2759
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002761void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762{
2763 struct hci_command_hdr *hdr;
2764
2765 if (!hdev->sent_cmd)
2766 return NULL;
2767
2768 hdr = (void *) hdev->sent_cmd->data;
2769
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002770 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 return NULL;
2772
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002773 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
2775 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2776}
2777
2778/* Send ACL data */
2779static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2780{
2781 struct hci_acl_hdr *hdr;
2782 int len = skb->len;
2783
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002784 skb_push(skb, HCI_ACL_HDR_SIZE);
2785 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002786 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002787 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2788 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789}
2790
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002791static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002792 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002794 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 struct hci_dev *hdev = conn->hdev;
2796 struct sk_buff *list;
2797
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002798 skb->len = skb_headlen(skb);
2799 skb->data_len = 0;
2800
2801 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002802
2803 switch (hdev->dev_type) {
2804 case HCI_BREDR:
2805 hci_add_acl_hdr(skb, conn->handle, flags);
2806 break;
2807 case HCI_AMP:
2808 hci_add_acl_hdr(skb, chan->handle, flags);
2809 break;
2810 default:
2811 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2812 return;
2813 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002814
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002815 list = skb_shinfo(skb)->frag_list;
2816 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 /* Non fragmented */
2818 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2819
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002820 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 } else {
2822 /* Fragmented */
2823 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2824
2825 skb_shinfo(skb)->frag_list = NULL;
2826
2827 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002828 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002830 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002831
2832 flags &= ~ACL_START;
2833 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 do {
2835 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002836
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002838 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002839 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
2841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2842
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002843 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 } while (list);
2845
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002846 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002848}
2849
2850void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2851{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002852 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002853
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002854 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002855
2856 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002857
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002858 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002860 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
2863/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002864void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865{
2866 struct hci_dev *hdev = conn->hdev;
2867 struct hci_sco_hdr hdr;
2868
2869 BT_DBG("%s len %d", hdev->name, skb->len);
2870
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002871 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 hdr.dlen = skb->len;
2873
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002874 skb_push(skb, HCI_SCO_HDR_SIZE);
2875 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002876 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
2878 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002879 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002880
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002882 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
2885/* ---- HCI TX task (outgoing data) ---- */
2886
2887/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002888static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2889 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890{
2891 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002892 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002893 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002895 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002897
2898 rcu_read_lock();
2899
2900 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002901 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002903
2904 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2905 continue;
2906
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 num++;
2908
2909 if (c->sent < min) {
2910 min = c->sent;
2911 conn = c;
2912 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002913
2914 if (hci_conn_num(hdev, type) == num)
2915 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 }
2917
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002918 rcu_read_unlock();
2919
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002921 int cnt, q;
2922
2923 switch (conn->type) {
2924 case ACL_LINK:
2925 cnt = hdev->acl_cnt;
2926 break;
2927 case SCO_LINK:
2928 case ESCO_LINK:
2929 cnt = hdev->sco_cnt;
2930 break;
2931 case LE_LINK:
2932 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2933 break;
2934 default:
2935 cnt = 0;
2936 BT_ERR("Unknown link type");
2937 }
2938
2939 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 *quote = q ? q : 1;
2941 } else
2942 *quote = 0;
2943
2944 BT_DBG("conn %p quote %d", conn, *quote);
2945 return conn;
2946}
2947
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002948static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949{
2950 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002951 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952
Ville Tervobae1f5d92011-02-10 22:38:53 -03002953 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002955 rcu_read_lock();
2956
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002958 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002959 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002960 BT_ERR("%s killing stalled connection %pMR",
2961 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002962 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 }
2964 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002965
2966 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967}
2968
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002969static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2970 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002971{
2972 struct hci_conn_hash *h = &hdev->conn_hash;
2973 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002974 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002975 struct hci_conn *conn;
2976 int cnt, q, conn_num = 0;
2977
2978 BT_DBG("%s", hdev->name);
2979
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002980 rcu_read_lock();
2981
2982 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002983 struct hci_chan *tmp;
2984
2985 if (conn->type != type)
2986 continue;
2987
2988 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2989 continue;
2990
2991 conn_num++;
2992
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002993 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002994 struct sk_buff *skb;
2995
2996 if (skb_queue_empty(&tmp->data_q))
2997 continue;
2998
2999 skb = skb_peek(&tmp->data_q);
3000 if (skb->priority < cur_prio)
3001 continue;
3002
3003 if (skb->priority > cur_prio) {
3004 num = 0;
3005 min = ~0;
3006 cur_prio = skb->priority;
3007 }
3008
3009 num++;
3010
3011 if (conn->sent < min) {
3012 min = conn->sent;
3013 chan = tmp;
3014 }
3015 }
3016
3017 if (hci_conn_num(hdev, type) == conn_num)
3018 break;
3019 }
3020
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003021 rcu_read_unlock();
3022
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003023 if (!chan)
3024 return NULL;
3025
3026 switch (chan->conn->type) {
3027 case ACL_LINK:
3028 cnt = hdev->acl_cnt;
3029 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003030 case AMP_LINK:
3031 cnt = hdev->block_cnt;
3032 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003033 case SCO_LINK:
3034 case ESCO_LINK:
3035 cnt = hdev->sco_cnt;
3036 break;
3037 case LE_LINK:
3038 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3039 break;
3040 default:
3041 cnt = 0;
3042 BT_ERR("Unknown link type");
3043 }
3044
3045 q = cnt / num;
3046 *quote = q ? q : 1;
3047 BT_DBG("chan %p quote %d", chan, *quote);
3048 return chan;
3049}
3050
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003051static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3052{
3053 struct hci_conn_hash *h = &hdev->conn_hash;
3054 struct hci_conn *conn;
3055 int num = 0;
3056
3057 BT_DBG("%s", hdev->name);
3058
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003059 rcu_read_lock();
3060
3061 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003062 struct hci_chan *chan;
3063
3064 if (conn->type != type)
3065 continue;
3066
3067 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3068 continue;
3069
3070 num++;
3071
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003072 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003073 struct sk_buff *skb;
3074
3075 if (chan->sent) {
3076 chan->sent = 0;
3077 continue;
3078 }
3079
3080 if (skb_queue_empty(&chan->data_q))
3081 continue;
3082
3083 skb = skb_peek(&chan->data_q);
3084 if (skb->priority >= HCI_PRIO_MAX - 1)
3085 continue;
3086
3087 skb->priority = HCI_PRIO_MAX - 1;
3088
3089 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003090 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003091 }
3092
3093 if (hci_conn_num(hdev, type) == num)
3094 break;
3095 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003096
3097 rcu_read_unlock();
3098
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003099}
3100
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003101static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3102{
3103 /* Calculate count of blocks used by this packet */
3104 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3105}
3106
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003107static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 if (!test_bit(HCI_RAW, &hdev->flags)) {
3110 /* ACL tx timeout must be longer than maximum
3111 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003112 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003113 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003114 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003116}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003118static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003119{
3120 unsigned int cnt = hdev->acl_cnt;
3121 struct hci_chan *chan;
3122 struct sk_buff *skb;
3123 int quote;
3124
3125 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003126
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003127 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003128 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003129 u32 priority = (skb_peek(&chan->data_q))->priority;
3130 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003131 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003132 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003133
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003134 /* Stop if priority has changed */
3135 if (skb->priority < priority)
3136 break;
3137
3138 skb = skb_dequeue(&chan->data_q);
3139
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003140 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003141 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003142
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 hci_send_frame(skb);
3144 hdev->acl_last_tx = jiffies;
3145
3146 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003147 chan->sent++;
3148 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 }
3150 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003151
3152 if (cnt != hdev->acl_cnt)
3153 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154}
3155
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003156static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003157{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003158 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003159 struct hci_chan *chan;
3160 struct sk_buff *skb;
3161 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003162 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003163
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003164 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003165
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003166 BT_DBG("%s", hdev->name);
3167
3168 if (hdev->dev_type == HCI_AMP)
3169 type = AMP_LINK;
3170 else
3171 type = ACL_LINK;
3172
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003173 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003174 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003175 u32 priority = (skb_peek(&chan->data_q))->priority;
3176 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3177 int blocks;
3178
3179 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003180 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003181
3182 /* Stop if priority has changed */
3183 if (skb->priority < priority)
3184 break;
3185
3186 skb = skb_dequeue(&chan->data_q);
3187
3188 blocks = __get_blocks(hdev, skb);
3189 if (blocks > hdev->block_cnt)
3190 return;
3191
3192 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003193 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003194
3195 hci_send_frame(skb);
3196 hdev->acl_last_tx = jiffies;
3197
3198 hdev->block_cnt -= blocks;
3199 quote -= blocks;
3200
3201 chan->sent += blocks;
3202 chan->conn->sent += blocks;
3203 }
3204 }
3205
3206 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003207 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003208}
3209
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003210static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003211{
3212 BT_DBG("%s", hdev->name);
3213
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003214 /* No ACL link over BR/EDR controller */
3215 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3216 return;
3217
3218 /* No AMP link over AMP controller */
3219 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003220 return;
3221
3222 switch (hdev->flow_ctl_mode) {
3223 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3224 hci_sched_acl_pkt(hdev);
3225 break;
3226
3227 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3228 hci_sched_acl_blk(hdev);
3229 break;
3230 }
3231}
3232
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003234static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235{
3236 struct hci_conn *conn;
3237 struct sk_buff *skb;
3238 int quote;
3239
3240 BT_DBG("%s", hdev->name);
3241
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003242 if (!hci_conn_num(hdev, SCO_LINK))
3243 return;
3244
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3246 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3247 BT_DBG("skb %p len %d", skb, skb->len);
3248 hci_send_frame(skb);
3249
3250 conn->sent++;
3251 if (conn->sent == ~0)
3252 conn->sent = 0;
3253 }
3254 }
3255}
3256
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003257static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003258{
3259 struct hci_conn *conn;
3260 struct sk_buff *skb;
3261 int quote;
3262
3263 BT_DBG("%s", hdev->name);
3264
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003265 if (!hci_conn_num(hdev, ESCO_LINK))
3266 return;
3267
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003268 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3269 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003270 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3271 BT_DBG("skb %p len %d", skb, skb->len);
3272 hci_send_frame(skb);
3273
3274 conn->sent++;
3275 if (conn->sent == ~0)
3276 conn->sent = 0;
3277 }
3278 }
3279}
3280
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003281static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003282{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003283 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003284 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003285 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003286
3287 BT_DBG("%s", hdev->name);
3288
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003289 if (!hci_conn_num(hdev, LE_LINK))
3290 return;
3291
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003292 if (!test_bit(HCI_RAW, &hdev->flags)) {
3293 /* LE tx timeout must be longer than maximum
3294 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003295 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003296 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003297 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003298 }
3299
3300 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003301 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003302 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003303 u32 priority = (skb_peek(&chan->data_q))->priority;
3304 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003305 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003306 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003307
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003308 /* Stop if priority has changed */
3309 if (skb->priority < priority)
3310 break;
3311
3312 skb = skb_dequeue(&chan->data_q);
3313
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003314 hci_send_frame(skb);
3315 hdev->le_last_tx = jiffies;
3316
3317 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003318 chan->sent++;
3319 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003320 }
3321 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003322
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003323 if (hdev->le_pkts)
3324 hdev->le_cnt = cnt;
3325 else
3326 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003327
3328 if (cnt != tmp)
3329 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003330}
3331
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003332static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003334 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 struct sk_buff *skb;
3336
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003337 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003338 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
Marcel Holtmann52de5992013-09-03 18:08:38 -07003340 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3341 /* Schedule queues and send stuff to HCI driver */
3342 hci_sched_acl(hdev);
3343 hci_sched_sco(hdev);
3344 hci_sched_esco(hdev);
3345 hci_sched_le(hdev);
3346 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003347
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 /* Send next queued raw (unknown type) packet */
3349 while ((skb = skb_dequeue(&hdev->raw_q)))
3350 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351}
3352
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003353/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
3355/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003356static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357{
3358 struct hci_acl_hdr *hdr = (void *) skb->data;
3359 struct hci_conn *conn;
3360 __u16 handle, flags;
3361
3362 skb_pull(skb, HCI_ACL_HDR_SIZE);
3363
3364 handle = __le16_to_cpu(hdr->handle);
3365 flags = hci_flags(handle);
3366 handle = hci_handle(handle);
3367
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003368 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003369 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370
3371 hdev->stat.acl_rx++;
3372
3373 hci_dev_lock(hdev);
3374 conn = hci_conn_hash_lookup_handle(hdev, handle);
3375 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003378 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003379
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003381 l2cap_recv_acldata(conn, skb, flags);
3382 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003384 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003385 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 }
3387
3388 kfree_skb(skb);
3389}
3390
3391/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003392static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393{
3394 struct hci_sco_hdr *hdr = (void *) skb->data;
3395 struct hci_conn *conn;
3396 __u16 handle;
3397
3398 skb_pull(skb, HCI_SCO_HDR_SIZE);
3399
3400 handle = __le16_to_cpu(hdr->handle);
3401
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003402 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
3404 hdev->stat.sco_rx++;
3405
3406 hci_dev_lock(hdev);
3407 conn = hci_conn_hash_lookup_handle(hdev, handle);
3408 hci_dev_unlock(hdev);
3409
3410 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003412 sco_recv_scodata(conn, skb);
3413 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003415 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003416 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 }
3418
3419 kfree_skb(skb);
3420}
3421
Johan Hedberg9238f362013-03-05 20:37:48 +02003422static bool hci_req_is_complete(struct hci_dev *hdev)
3423{
3424 struct sk_buff *skb;
3425
3426 skb = skb_peek(&hdev->cmd_q);
3427 if (!skb)
3428 return true;
3429
3430 return bt_cb(skb)->req.start;
3431}
3432
Johan Hedberg42c6b122013-03-05 20:37:49 +02003433static void hci_resend_last(struct hci_dev *hdev)
3434{
3435 struct hci_command_hdr *sent;
3436 struct sk_buff *skb;
3437 u16 opcode;
3438
3439 if (!hdev->sent_cmd)
3440 return;
3441
3442 sent = (void *) hdev->sent_cmd->data;
3443 opcode = __le16_to_cpu(sent->opcode);
3444 if (opcode == HCI_OP_RESET)
3445 return;
3446
3447 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3448 if (!skb)
3449 return;
3450
3451 skb_queue_head(&hdev->cmd_q, skb);
3452 queue_work(hdev->workqueue, &hdev->cmd_work);
3453}
3454
Johan Hedberg9238f362013-03-05 20:37:48 +02003455void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3456{
3457 hci_req_complete_t req_complete = NULL;
3458 struct sk_buff *skb;
3459 unsigned long flags;
3460
3461 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3462
Johan Hedberg42c6b122013-03-05 20:37:49 +02003463 /* If the completed command doesn't match the last one that was
3464 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003465 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003466 if (!hci_sent_cmd_data(hdev, opcode)) {
3467 /* Some CSR based controllers generate a spontaneous
3468 * reset complete event during init and any pending
3469 * command will never be completed. In such a case we
3470 * need to resend whatever was the last sent
3471 * command.
3472 */
3473 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3474 hci_resend_last(hdev);
3475
Johan Hedberg9238f362013-03-05 20:37:48 +02003476 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003477 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003478
3479 /* If the command succeeded and there's still more commands in
3480 * this request the request is not yet complete.
3481 */
3482 if (!status && !hci_req_is_complete(hdev))
3483 return;
3484
3485 /* If this was the last command in a request the complete
3486 * callback would be found in hdev->sent_cmd instead of the
3487 * command queue (hdev->cmd_q).
3488 */
3489 if (hdev->sent_cmd) {
3490 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003491
3492 if (req_complete) {
3493 /* We must set the complete callback to NULL to
3494 * avoid calling the callback more than once if
3495 * this function gets called again.
3496 */
3497 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3498
Johan Hedberg9238f362013-03-05 20:37:48 +02003499 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003500 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003501 }
3502
3503 /* Remove all pending commands belonging to this request */
3504 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3505 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3506 if (bt_cb(skb)->req.start) {
3507 __skb_queue_head(&hdev->cmd_q, skb);
3508 break;
3509 }
3510
3511 req_complete = bt_cb(skb)->req.complete;
3512 kfree_skb(skb);
3513 }
3514 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3515
3516call_complete:
3517 if (req_complete)
3518 req_complete(hdev, status);
3519}
3520
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003521static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003523 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 struct sk_buff *skb;
3525
3526 BT_DBG("%s", hdev->name);
3527
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003529 /* Send copy to monitor */
3530 hci_send_to_monitor(hdev, skb);
3531
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 if (atomic_read(&hdev->promisc)) {
3533 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003534 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 }
3536
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003537 if (test_bit(HCI_RAW, &hdev->flags) ||
3538 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 kfree_skb(skb);
3540 continue;
3541 }
3542
3543 if (test_bit(HCI_INIT, &hdev->flags)) {
3544 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003545 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 case HCI_ACLDATA_PKT:
3547 case HCI_SCODATA_PKT:
3548 kfree_skb(skb);
3549 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 }
3552
3553 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003554 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003556 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 hci_event_packet(hdev, skb);
3558 break;
3559
3560 case HCI_ACLDATA_PKT:
3561 BT_DBG("%s ACL data packet", hdev->name);
3562 hci_acldata_packet(hdev, skb);
3563 break;
3564
3565 case HCI_SCODATA_PKT:
3566 BT_DBG("%s SCO data packet", hdev->name);
3567 hci_scodata_packet(hdev, skb);
3568 break;
3569
3570 default:
3571 kfree_skb(skb);
3572 break;
3573 }
3574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575}
3576
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003577static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003579 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 struct sk_buff *skb;
3581
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003582 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3583 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003586 if (atomic_read(&hdev->cmd_cnt)) {
3587 skb = skb_dequeue(&hdev->cmd_q);
3588 if (!skb)
3589 return;
3590
Wei Yongjun7585b972009-02-25 18:29:52 +08003591 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003593 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003594 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 atomic_dec(&hdev->cmd_cnt);
3596 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003597 if (test_bit(HCI_RESET, &hdev->flags))
3598 del_timer(&hdev->cmd_timer);
3599 else
3600 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003601 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 } else {
3603 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003604 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 }
3606 }
3607}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003608
Andre Guedes31f79562012-04-24 21:02:53 -03003609u8 bdaddr_to_le(u8 bdaddr_type)
3610{
3611 switch (bdaddr_type) {
3612 case BDADDR_LE_PUBLIC:
3613 return ADDR_LE_DEV_PUBLIC;
3614
3615 default:
3616 /* Fallback to LE Random address type */
3617 return ADDR_LE_DEV_RANDOM;
3618 }
3619}