blob: ea542e07b2e95b47535ce9249b2c4da88cbfe800 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
618 *
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700623 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
626
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
631 }
632
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500636 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500638 hci_update_ad(req);
639 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300640
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
648 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649}
650
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300651static void hci_init4_req(struct hci_request *req, unsigned long opt)
652{
653 struct hci_dev *hdev = req->hdev;
654
655 /* Check for Synchronization Train support */
656 if (hdev->features[2][0] & 0x04)
657 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
658}
659
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660static int __hci_init(struct hci_dev *hdev)
661{
662 int err;
663
664 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
665 if (err < 0)
666 return err;
667
668 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
669 * BR/EDR/LE type controllers. AMP controllers only need the
670 * first stage init.
671 */
672 if (hdev->dev_type != HCI_BREDR)
673 return 0;
674
675 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
676 if (err < 0)
677 return err;
678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300679 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
680 if (err < 0)
681 return err;
682
683 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200684}
685
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
688 __u8 scan = opt;
689
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694}
695
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 __u8 auth = opt;
699
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707{
708 __u8 encrypt = opt;
709
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200712 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
Johan Hedberg42c6b122013-03-05 20:37:49 +0200716static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200717{
718 __le16 policy = cpu_to_le16(opt);
719
Johan Hedberg42c6b122013-03-05 20:37:49 +0200720 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200721
722 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200723 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200724}
725
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900726/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 * Device is held on return. */
728struct hci_dev *hci_dev_get(int index)
729{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200730 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
732 BT_DBG("%d", index);
733
734 if (index < 0)
735 return NULL;
736
737 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200738 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (d->id == index) {
740 hdev = hci_dev_hold(d);
741 break;
742 }
743 }
744 read_unlock(&hci_dev_list_lock);
745 return hdev;
746}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200749
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200750bool hci_discovery_active(struct hci_dev *hdev)
751{
752 struct discovery_state *discov = &hdev->discovery;
753
Andre Guedes6fbe1952012-02-03 17:47:58 -0300754 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300755 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300756 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200757 return true;
758
Andre Guedes6fbe1952012-02-03 17:47:58 -0300759 default:
760 return false;
761 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200762}
763
Johan Hedbergff9ef572012-01-04 14:23:45 +0200764void hci_discovery_set_state(struct hci_dev *hdev, int state)
765{
766 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
767
768 if (hdev->discovery.state == state)
769 return;
770
771 switch (state) {
772 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300773 if (hdev->discovery.state != DISCOVERY_STARTING)
774 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200775 break;
776 case DISCOVERY_STARTING:
777 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300778 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200779 mgmt_discovering(hdev, 1);
780 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200781 case DISCOVERY_RESOLVING:
782 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200783 case DISCOVERY_STOPPING:
784 break;
785 }
786
787 hdev->discovery.state = state;
788}
789
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300790void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
Johan Hedberg30883512012-01-04 14:16:21 +0200792 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200793 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
Johan Hedberg561aafb2012-01-04 13:31:59 +0200795 list_for_each_entry_safe(p, n, &cache->all, all) {
796 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200797 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799
800 INIT_LIST_HEAD(&cache->unknown);
801 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
803
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300804struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
805 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806{
Johan Hedberg30883512012-01-04 14:16:21 +0200807 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 struct inquiry_entry *e;
809
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300810 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Johan Hedberg561aafb2012-01-04 13:31:59 +0200812 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200814 return e;
815 }
816
817 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818}
819
Johan Hedberg561aafb2012-01-04 13:31:59 +0200820struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300821 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200822{
Johan Hedberg30883512012-01-04 14:16:21 +0200823 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200824 struct inquiry_entry *e;
825
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300826 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200827
828 list_for_each_entry(e, &cache->unknown, list) {
829 if (!bacmp(&e->data.bdaddr, bdaddr))
830 return e;
831 }
832
833 return NULL;
834}
835
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200836struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300837 bdaddr_t *bdaddr,
838 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200839{
840 struct discovery_state *cache = &hdev->discovery;
841 struct inquiry_entry *e;
842
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300843 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200844
845 list_for_each_entry(e, &cache->resolve, list) {
846 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
847 return e;
848 if (!bacmp(&e->data.bdaddr, bdaddr))
849 return e;
850 }
851
852 return NULL;
853}
854
Johan Hedberga3d4e202012-01-09 00:53:02 +0200855void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300856 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200857{
858 struct discovery_state *cache = &hdev->discovery;
859 struct list_head *pos = &cache->resolve;
860 struct inquiry_entry *p;
861
862 list_del(&ie->list);
863
864 list_for_each_entry(p, &cache->resolve, list) {
865 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300866 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200867 break;
868 pos = &p->list;
869 }
870
871 list_add(&ie->list, pos);
872}
873
Johan Hedberg31754052012-01-04 13:39:52 +0200874bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300875 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Johan Hedberg30883512012-01-04 14:16:21 +0200877 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200878 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300880 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Szymon Janc2b2fec42012-11-20 11:38:54 +0100882 hci_remove_remote_oob_data(hdev, &data->bdaddr);
883
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200884 if (ssp)
885 *ssp = data->ssp_mode;
886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200887 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200888 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200889 if (ie->data.ssp_mode && ssp)
890 *ssp = true;
891
Johan Hedberga3d4e202012-01-09 00:53:02 +0200892 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300893 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200894 ie->data.rssi = data->rssi;
895 hci_inquiry_cache_update_resolve(hdev, ie);
896 }
897
Johan Hedberg561aafb2012-01-04 13:31:59 +0200898 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200899 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200900
Johan Hedberg561aafb2012-01-04 13:31:59 +0200901 /* Entry not in the cache. Add new one. */
902 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
903 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200904 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200905
906 list_add(&ie->all, &cache->all);
907
908 if (name_known) {
909 ie->name_state = NAME_KNOWN;
910 } else {
911 ie->name_state = NAME_NOT_KNOWN;
912 list_add(&ie->list, &cache->unknown);
913 }
914
915update:
916 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300917 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200918 ie->name_state = NAME_KNOWN;
919 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
921
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200922 memcpy(&ie->data, data, sizeof(*data));
923 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200925
926 if (ie->name_state == NAME_NOT_KNOWN)
927 return false;
928
929 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930}
931
932static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
933{
Johan Hedberg30883512012-01-04 14:16:21 +0200934 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 struct inquiry_info *info = (struct inquiry_info *) buf;
936 struct inquiry_entry *e;
937 int copied = 0;
938
Johan Hedberg561aafb2012-01-04 13:31:59 +0200939 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200941
942 if (copied >= num)
943 break;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 bacpy(&info->bdaddr, &data->bdaddr);
946 info->pscan_rep_mode = data->pscan_rep_mode;
947 info->pscan_period_mode = data->pscan_period_mode;
948 info->pscan_mode = data->pscan_mode;
949 memcpy(info->dev_class, data->dev_class, 3);
950 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200953 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
955
956 BT_DBG("cache %p, copied %d", cache, copied);
957 return copied;
958}
959
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
962 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 struct hci_cp_inquiry cp;
965
966 BT_DBG("%s", hdev->name);
967
968 if (test_bit(HCI_INQUIRY, &hdev->flags))
969 return;
970
971 /* Start Inquiry */
972 memcpy(&cp.lap, &ir->lap, 3);
973 cp.length = ir->length;
974 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
Andre Guedes3e13fa12013-03-27 20:04:56 -0300978static int wait_inquiry(void *word)
979{
980 schedule();
981 return signal_pending(current);
982}
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984int hci_inquiry(void __user *arg)
985{
986 __u8 __user *ptr = arg;
987 struct hci_inquiry_req ir;
988 struct hci_dev *hdev;
989 int err = 0, do_inquiry = 0, max_rsp;
990 long timeo;
991 __u8 *buf;
992
993 if (copy_from_user(&ir, ptr, sizeof(ir)))
994 return -EFAULT;
995
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200996 hdev = hci_dev_get(ir.dev_id);
997 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 return -ENODEV;
999
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001000 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1001 err = -EBUSY;
1002 goto done;
1003 }
1004
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001005 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001006 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001007 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001008 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 do_inquiry = 1;
1010 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001011 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Marcel Holtmann04837f62006-07-03 10:02:33 +02001013 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001014
1015 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001016 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1017 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001018 if (err < 0)
1019 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001020
1021 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1022 * cleared). If it is interrupted by a signal, return -EINTR.
1023 */
1024 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1025 TASK_INTERRUPTIBLE))
1026 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001027 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001029 /* for unlimited number of responses we will use buffer with
1030 * 255 entries
1031 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1033
1034 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1035 * copy it to the user space.
1036 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001037 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001038 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 err = -ENOMEM;
1040 goto done;
1041 }
1042
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001045 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047 BT_DBG("num_rsp %d", ir.num_rsp);
1048
1049 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1050 ptr += sizeof(ir);
1051 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001052 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001054 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 err = -EFAULT;
1056
1057 kfree(buf);
1058
1059done:
1060 hci_dev_put(hdev);
1061 return err;
1062}
1063
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001064static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1065{
1066 u8 ad_len = 0, flags = 0;
1067 size_t name_len;
1068
1069 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1070 flags |= LE_AD_GENERAL;
1071
1072 if (!lmp_bredr_capable(hdev))
1073 flags |= LE_AD_NO_BREDR;
1074
1075 if (lmp_le_br_capable(hdev))
1076 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1077
1078 if (lmp_host_le_br_capable(hdev))
1079 flags |= LE_AD_SIM_LE_BREDR_HOST;
1080
1081 if (flags) {
1082 BT_DBG("adv flags 0x%02x", flags);
1083
1084 ptr[0] = 2;
1085 ptr[1] = EIR_FLAGS;
1086 ptr[2] = flags;
1087
1088 ad_len += 3;
1089 ptr += 3;
1090 }
1091
1092 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1093 ptr[0] = 2;
1094 ptr[1] = EIR_TX_POWER;
1095 ptr[2] = (u8) hdev->adv_tx_power;
1096
1097 ad_len += 3;
1098 ptr += 3;
1099 }
1100
1101 name_len = strlen(hdev->dev_name);
1102 if (name_len > 0) {
1103 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1104
1105 if (name_len > max_len) {
1106 name_len = max_len;
1107 ptr[1] = EIR_NAME_SHORT;
1108 } else
1109 ptr[1] = EIR_NAME_COMPLETE;
1110
1111 ptr[0] = name_len + 1;
1112
1113 memcpy(ptr + 2, hdev->dev_name, name_len);
1114
1115 ad_len += (name_len + 2);
1116 ptr += (name_len + 2);
1117 }
1118
1119 return ad_len;
1120}
1121
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001122void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001123{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001124 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001125 struct hci_cp_le_set_adv_data cp;
1126 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001127
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001128 if (!lmp_le_capable(hdev))
1129 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001130
1131 memset(&cp, 0, sizeof(cp));
1132
1133 len = create_ad(hdev, cp.data);
1134
1135 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001136 memcmp(cp.data, hdev->adv_data, len) == 0)
1137 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001138
1139 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1140 hdev->adv_data_len = len;
1141
1142 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001143
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001144 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001145}
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147/* ---- HCI ioctl helpers ---- */
1148
1149int hci_dev_open(__u16 dev)
1150{
1151 struct hci_dev *hdev;
1152 int ret = 0;
1153
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001154 hdev = hci_dev_get(dev);
1155 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return -ENODEV;
1157
1158 BT_DBG("%s %p", hdev->name, hdev);
1159
1160 hci_req_lock(hdev);
1161
Johan Hovold94324962012-03-15 14:48:41 +01001162 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1163 ret = -ENODEV;
1164 goto done;
1165 }
1166
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001167 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1168 ret = -ERFKILL;
1169 goto done;
1170 }
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 if (test_bit(HCI_UP, &hdev->flags)) {
1173 ret = -EALREADY;
1174 goto done;
1175 }
1176
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 if (hdev->open(hdev)) {
1178 ret = -EIO;
1179 goto done;
1180 }
1181
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001182 atomic_set(&hdev->cmd_cnt, 1);
1183 set_bit(HCI_INIT, &hdev->flags);
1184
1185 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1186 ret = hdev->setup(hdev);
1187
1188 if (!ret) {
1189 /* Treat all non BR/EDR controllers as raw devices if
1190 * enable_hs is not set.
1191 */
1192 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1193 set_bit(HCI_RAW, &hdev->flags);
1194
1195 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1196 set_bit(HCI_RAW, &hdev->flags);
1197
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001198 if (!test_bit(HCI_RAW, &hdev->flags) &&
1199 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001200 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
1202
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001203 clear_bit(HCI_INIT, &hdev->flags);
1204
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 if (!ret) {
1206 hci_dev_hold(hdev);
1207 set_bit(HCI_UP, &hdev->flags);
1208 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001209 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001210 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001211 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001212 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001213 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001214 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001215 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001216 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001218 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001219 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001220 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 skb_queue_purge(&hdev->cmd_q);
1223 skb_queue_purge(&hdev->rx_q);
1224
1225 if (hdev->flush)
1226 hdev->flush(hdev);
1227
1228 if (hdev->sent_cmd) {
1229 kfree_skb(hdev->sent_cmd);
1230 hdev->sent_cmd = NULL;
1231 }
1232
1233 hdev->close(hdev);
1234 hdev->flags = 0;
1235 }
1236
1237done:
1238 hci_req_unlock(hdev);
1239 hci_dev_put(hdev);
1240 return ret;
1241}
1242
1243static int hci_dev_do_close(struct hci_dev *hdev)
1244{
1245 BT_DBG("%s %p", hdev->name, hdev);
1246
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001247 cancel_delayed_work(&hdev->power_off);
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 hci_req_cancel(hdev, ENODEV);
1250 hci_req_lock(hdev);
1251
1252 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001253 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 hci_req_unlock(hdev);
1255 return 0;
1256 }
1257
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001258 /* Flush RX and TX works */
1259 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001260 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001262 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001263 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001264 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001265 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001266 }
1267
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001268 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001269 cancel_delayed_work(&hdev->service_cache);
1270
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001271 cancel_delayed_work_sync(&hdev->le_scan_disable);
1272
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001273 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001274 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001276 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
1278 hci_notify(hdev, HCI_DEV_DOWN);
1279
1280 if (hdev->flush)
1281 hdev->flush(hdev);
1282
1283 /* Reset device */
1284 skb_queue_purge(&hdev->cmd_q);
1285 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001286 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001287 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001289 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 clear_bit(HCI_INIT, &hdev->flags);
1291 }
1292
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001293 /* flush cmd work */
1294 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
1296 /* Drop queues */
1297 skb_queue_purge(&hdev->rx_q);
1298 skb_queue_purge(&hdev->cmd_q);
1299 skb_queue_purge(&hdev->raw_q);
1300
1301 /* Drop last sent command */
1302 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001303 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 kfree_skb(hdev->sent_cmd);
1305 hdev->sent_cmd = NULL;
1306 }
1307
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001308 kfree_skb(hdev->recv_evt);
1309 hdev->recv_evt = NULL;
1310
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 /* After this point our queues are empty
1312 * and no tasks are scheduled. */
1313 hdev->close(hdev);
1314
Johan Hedberg35b973c2013-03-15 17:06:59 -05001315 /* Clear flags */
1316 hdev->flags = 0;
1317 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1318
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001319 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1320 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001321 hci_dev_lock(hdev);
1322 mgmt_powered(hdev, 0);
1323 hci_dev_unlock(hdev);
1324 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001325
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001326 /* Controller radio is available but is currently powered down */
1327 hdev->amp_status = 0;
1328
Johan Hedberge59fda82012-02-22 18:11:53 +02001329 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001330 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_req_unlock(hdev);
1333
1334 hci_dev_put(hdev);
1335 return 0;
1336}
1337
1338int hci_dev_close(__u16 dev)
1339{
1340 struct hci_dev *hdev;
1341 int err;
1342
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001343 hdev = hci_dev_get(dev);
1344 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001346
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001347 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1348 err = -EBUSY;
1349 goto done;
1350 }
1351
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001352 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1353 cancel_delayed_work(&hdev->power_off);
1354
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001356
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001357done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 hci_dev_put(hdev);
1359 return err;
1360}
1361
1362int hci_dev_reset(__u16 dev)
1363{
1364 struct hci_dev *hdev;
1365 int ret = 0;
1366
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001367 hdev = hci_dev_get(dev);
1368 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return -ENODEV;
1370
1371 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Marcel Holtmann808a0492013-08-26 20:57:58 -07001373 if (!test_bit(HCI_UP, &hdev->flags)) {
1374 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001376 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001378 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1379 ret = -EBUSY;
1380 goto done;
1381 }
1382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 /* Drop queues */
1384 skb_queue_purge(&hdev->rx_q);
1385 skb_queue_purge(&hdev->cmd_q);
1386
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001387 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001388 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001390 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
1392 if (hdev->flush)
1393 hdev->flush(hdev);
1394
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001395 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001396 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001399 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
1401done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 hci_req_unlock(hdev);
1403 hci_dev_put(hdev);
1404 return ret;
1405}
1406
1407int hci_dev_reset_stat(__u16 dev)
1408{
1409 struct hci_dev *hdev;
1410 int ret = 0;
1411
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001412 hdev = hci_dev_get(dev);
1413 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return -ENODEV;
1415
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001416 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1417 ret = -EBUSY;
1418 goto done;
1419 }
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1422
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001423done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return ret;
1426}
1427
1428int hci_dev_cmd(unsigned int cmd, void __user *arg)
1429{
1430 struct hci_dev *hdev;
1431 struct hci_dev_req dr;
1432 int err = 0;
1433
1434 if (copy_from_user(&dr, arg, sizeof(dr)))
1435 return -EFAULT;
1436
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001437 hdev = hci_dev_get(dr.dev_id);
1438 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 return -ENODEV;
1440
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1442 err = -EBUSY;
1443 goto done;
1444 }
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 switch (cmd) {
1447 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001448 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1449 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 break;
1451
1452 case HCISETENCRYPT:
1453 if (!lmp_encrypt_capable(hdev)) {
1454 err = -EOPNOTSUPP;
1455 break;
1456 }
1457
1458 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1459 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001460 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1461 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 if (err)
1463 break;
1464 }
1465
Johan Hedberg01178cd2013-03-05 20:37:41 +02001466 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1467 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 break;
1469
1470 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001471 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1472 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 break;
1474
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001475 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001476 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1477 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001478 break;
1479
1480 case HCISETLINKMODE:
1481 hdev->link_mode = ((__u16) dr.dev_opt) &
1482 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1483 break;
1484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 case HCISETPTYPE:
1486 hdev->pkt_type = (__u16) dr.dev_opt;
1487 break;
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001490 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1491 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 break;
1493
1494 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001495 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1496 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 break;
1498
1499 default:
1500 err = -EINVAL;
1501 break;
1502 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001503
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001504done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 hci_dev_put(hdev);
1506 return err;
1507}
1508
1509int hci_get_dev_list(void __user *arg)
1510{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001511 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 struct hci_dev_list_req *dl;
1513 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 int n = 0, size, err;
1515 __u16 dev_num;
1516
1517 if (get_user(dev_num, (__u16 __user *) arg))
1518 return -EFAULT;
1519
1520 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1521 return -EINVAL;
1522
1523 size = sizeof(*dl) + dev_num * sizeof(*dr);
1524
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001525 dl = kzalloc(size, GFP_KERNEL);
1526 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 return -ENOMEM;
1528
1529 dr = dl->dev_req;
1530
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001531 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001532 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001533 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001534 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001535
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001536 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1537 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 (dr + n)->dev_id = hdev->id;
1540 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 if (++n >= dev_num)
1543 break;
1544 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001545 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 dl->dev_num = n;
1548 size = sizeof(*dl) + n * sizeof(*dr);
1549
1550 err = copy_to_user(arg, dl, size);
1551 kfree(dl);
1552
1553 return err ? -EFAULT : 0;
1554}
1555
1556int hci_get_dev_info(void __user *arg)
1557{
1558 struct hci_dev *hdev;
1559 struct hci_dev_info di;
1560 int err = 0;
1561
1562 if (copy_from_user(&di, arg, sizeof(di)))
1563 return -EFAULT;
1564
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001565 hdev = hci_dev_get(di.dev_id);
1566 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 return -ENODEV;
1568
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001569 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001570 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001571
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001572 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 strcpy(di.name, hdev->name);
1576 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001577 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 di.flags = hdev->flags;
1579 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001580 if (lmp_bredr_capable(hdev)) {
1581 di.acl_mtu = hdev->acl_mtu;
1582 di.acl_pkts = hdev->acl_pkts;
1583 di.sco_mtu = hdev->sco_mtu;
1584 di.sco_pkts = hdev->sco_pkts;
1585 } else {
1586 di.acl_mtu = hdev->le_mtu;
1587 di.acl_pkts = hdev->le_pkts;
1588 di.sco_mtu = 0;
1589 di.sco_pkts = 0;
1590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 di.link_policy = hdev->link_policy;
1592 di.link_mode = hdev->link_mode;
1593
1594 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1595 memcpy(&di.features, &hdev->features, sizeof(di.features));
1596
1597 if (copy_to_user(arg, &di, sizeof(di)))
1598 err = -EFAULT;
1599
1600 hci_dev_put(hdev);
1601
1602 return err;
1603}
1604
1605/* ---- Interface to HCI drivers ---- */
1606
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001607static int hci_rfkill_set_block(void *data, bool blocked)
1608{
1609 struct hci_dev *hdev = data;
1610
1611 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1612
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001613 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1614 return -EBUSY;
1615
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001616 if (!blocked)
1617 return 0;
1618
1619 hci_dev_do_close(hdev);
1620
1621 return 0;
1622}
1623
1624static const struct rfkill_ops hci_rfkill_ops = {
1625 .set_block = hci_rfkill_set_block,
1626};
1627
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001628static void hci_power_on(struct work_struct *work)
1629{
1630 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001631 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001632
1633 BT_DBG("%s", hdev->name);
1634
Johan Hedberg96570ff2013-05-29 09:51:29 +03001635 err = hci_dev_open(hdev->id);
1636 if (err < 0) {
1637 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001638 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001639 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001640
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001641 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001642 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1643 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001644
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001645 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001646 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001647}
1648
1649static void hci_power_off(struct work_struct *work)
1650{
Johan Hedberg32435532011-11-07 22:16:04 +02001651 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001652 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001653
1654 BT_DBG("%s", hdev->name);
1655
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001656 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001657}
1658
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001659static void hci_discov_off(struct work_struct *work)
1660{
1661 struct hci_dev *hdev;
1662 u8 scan = SCAN_PAGE;
1663
1664 hdev = container_of(work, struct hci_dev, discov_off.work);
1665
1666 BT_DBG("%s", hdev->name);
1667
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001668 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001669
1670 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1671
1672 hdev->discov_timeout = 0;
1673
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001674 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001675}
1676
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001677int hci_uuids_clear(struct hci_dev *hdev)
1678{
Johan Hedberg48210022013-01-27 00:31:28 +02001679 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001680
Johan Hedberg48210022013-01-27 00:31:28 +02001681 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1682 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001683 kfree(uuid);
1684 }
1685
1686 return 0;
1687}
1688
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001689int hci_link_keys_clear(struct hci_dev *hdev)
1690{
1691 struct list_head *p, *n;
1692
1693 list_for_each_safe(p, n, &hdev->link_keys) {
1694 struct link_key *key;
1695
1696 key = list_entry(p, struct link_key, list);
1697
1698 list_del(p);
1699 kfree(key);
1700 }
1701
1702 return 0;
1703}
1704
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001705int hci_smp_ltks_clear(struct hci_dev *hdev)
1706{
1707 struct smp_ltk *k, *tmp;
1708
1709 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1710 list_del(&k->list);
1711 kfree(k);
1712 }
1713
1714 return 0;
1715}
1716
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001717struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1718{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001719 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001720
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001721 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001722 if (bacmp(bdaddr, &k->bdaddr) == 0)
1723 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001724
1725 return NULL;
1726}
1727
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301728static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001729 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001730{
1731 /* Legacy key */
1732 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301733 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001734
1735 /* Debug keys are insecure so don't store them persistently */
1736 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301737 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001738
1739 /* Changed combination key and there's no previous one */
1740 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301741 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001742
1743 /* Security mode 3 case */
1744 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301745 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001746
1747 /* Neither local nor remote side had no-bonding as requirement */
1748 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301749 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001750
1751 /* Local side had dedicated bonding as requirement */
1752 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301753 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001754
1755 /* Remote side had dedicated bonding as requirement */
1756 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301757 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001758
1759 /* If none of the above criteria match, then don't store the key
1760 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301761 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001762}
1763
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001764struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001765{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001766 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001767
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001768 list_for_each_entry(k, &hdev->long_term_keys, list) {
1769 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001770 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001771 continue;
1772
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001773 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001774 }
1775
1776 return NULL;
1777}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001778
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001779struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001780 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001781{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001782 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001783
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001784 list_for_each_entry(k, &hdev->long_term_keys, list)
1785 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001786 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001787 return k;
1788
1789 return NULL;
1790}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001792int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001793 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001794{
1795 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301796 u8 old_key_type;
1797 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001798
1799 old_key = hci_find_link_key(hdev, bdaddr);
1800 if (old_key) {
1801 old_key_type = old_key->type;
1802 key = old_key;
1803 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001804 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001805 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1806 if (!key)
1807 return -ENOMEM;
1808 list_add(&key->list, &hdev->link_keys);
1809 }
1810
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001811 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001812
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001813 /* Some buggy controller combinations generate a changed
1814 * combination key for legacy pairing even when there's no
1815 * previous key */
1816 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001817 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001818 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001819 if (conn)
1820 conn->key_type = type;
1821 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001822
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001823 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001824 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001825 key->pin_len = pin_len;
1826
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001827 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001828 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001829 else
1830 key->type = type;
1831
Johan Hedberg4df378a2011-04-28 11:29:03 -07001832 if (!new_key)
1833 return 0;
1834
1835 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1836
Johan Hedberg744cf192011-11-08 20:40:14 +02001837 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001838
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301839 if (conn)
1840 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001841
1842 return 0;
1843}
1844
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001845int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001846 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001847 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001848{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001849 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001850
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001851 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1852 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001853
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001854 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1855 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001856 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001857 else {
1858 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001859 if (!key)
1860 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001861 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001862 }
1863
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001864 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001865 key->bdaddr_type = addr_type;
1866 memcpy(key->val, tk, sizeof(key->val));
1867 key->authenticated = authenticated;
1868 key->ediv = ediv;
1869 key->enc_size = enc_size;
1870 key->type = type;
1871 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001872
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001873 if (!new_key)
1874 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001875
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001876 if (type & HCI_SMP_LTK)
1877 mgmt_new_ltk(hdev, key, 1);
1878
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001879 return 0;
1880}
1881
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001882int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1883{
1884 struct link_key *key;
1885
1886 key = hci_find_link_key(hdev, bdaddr);
1887 if (!key)
1888 return -ENOENT;
1889
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001890 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001891
1892 list_del(&key->list);
1893 kfree(key);
1894
1895 return 0;
1896}
1897
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001898int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1899{
1900 struct smp_ltk *k, *tmp;
1901
1902 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1903 if (bacmp(bdaddr, &k->bdaddr))
1904 continue;
1905
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001906 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001907
1908 list_del(&k->list);
1909 kfree(k);
1910 }
1911
1912 return 0;
1913}
1914
Ville Tervo6bd32322011-02-16 16:32:41 +02001915/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001916static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001917{
1918 struct hci_dev *hdev = (void *) arg;
1919
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001920 if (hdev->sent_cmd) {
1921 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1922 u16 opcode = __le16_to_cpu(sent->opcode);
1923
1924 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1925 } else {
1926 BT_ERR("%s command tx timeout", hdev->name);
1927 }
1928
Ville Tervo6bd32322011-02-16 16:32:41 +02001929 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001930 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001931}
1932
Szymon Janc2763eda2011-03-22 13:12:22 +01001933struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001934 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001935{
1936 struct oob_data *data;
1937
1938 list_for_each_entry(data, &hdev->remote_oob_data, list)
1939 if (bacmp(bdaddr, &data->bdaddr) == 0)
1940 return data;
1941
1942 return NULL;
1943}
1944
1945int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1946{
1947 struct oob_data *data;
1948
1949 data = hci_find_remote_oob_data(hdev, bdaddr);
1950 if (!data)
1951 return -ENOENT;
1952
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001953 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001954
1955 list_del(&data->list);
1956 kfree(data);
1957
1958 return 0;
1959}
1960
1961int hci_remote_oob_data_clear(struct hci_dev *hdev)
1962{
1963 struct oob_data *data, *n;
1964
1965 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1966 list_del(&data->list);
1967 kfree(data);
1968 }
1969
1970 return 0;
1971}
1972
1973int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001974 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001975{
1976 struct oob_data *data;
1977
1978 data = hci_find_remote_oob_data(hdev, bdaddr);
1979
1980 if (!data) {
1981 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1982 if (!data)
1983 return -ENOMEM;
1984
1985 bacpy(&data->bdaddr, bdaddr);
1986 list_add(&data->list, &hdev->remote_oob_data);
1987 }
1988
1989 memcpy(data->hash, hash, sizeof(data->hash));
1990 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1991
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001992 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001993
1994 return 0;
1995}
1996
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001997struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001998{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001999 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002000
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002001 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002002 if (bacmp(bdaddr, &b->bdaddr) == 0)
2003 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002004
2005 return NULL;
2006}
2007
2008int hci_blacklist_clear(struct hci_dev *hdev)
2009{
2010 struct list_head *p, *n;
2011
2012 list_for_each_safe(p, n, &hdev->blacklist) {
2013 struct bdaddr_list *b;
2014
2015 b = list_entry(p, struct bdaddr_list, list);
2016
2017 list_del(p);
2018 kfree(b);
2019 }
2020
2021 return 0;
2022}
2023
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002024int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002025{
2026 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002027
2028 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2029 return -EBADF;
2030
Antti Julku5e762442011-08-25 16:48:02 +03002031 if (hci_blacklist_lookup(hdev, bdaddr))
2032 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002033
2034 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002035 if (!entry)
2036 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002037
2038 bacpy(&entry->bdaddr, bdaddr);
2039
2040 list_add(&entry->list, &hdev->blacklist);
2041
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002042 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002043}
2044
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002045int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002046{
2047 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002048
Szymon Janc1ec918c2011-11-16 09:32:21 +01002049 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002050 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002051
2052 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002053 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002054 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002055
2056 list_del(&entry->list);
2057 kfree(entry);
2058
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002059 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002060}
2061
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002062static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002063{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002064 if (status) {
2065 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002066
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002067 hci_dev_lock(hdev);
2068 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069 hci_dev_unlock(hdev);
2070 return;
2071 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002072}
2073
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002074static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002075{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002076 /* General inquiry access code (GIAC) */
2077 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2078 struct hci_request req;
2079 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002080 int err;
2081
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002082 if (status) {
2083 BT_ERR("Failed to disable LE scanning: status %d", status);
2084 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002085 }
2086
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002087 switch (hdev->discovery.type) {
2088 case DISCOV_TYPE_LE:
2089 hci_dev_lock(hdev);
2090 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2091 hci_dev_unlock(hdev);
2092 break;
2093
2094 case DISCOV_TYPE_INTERLEAVED:
2095 hci_req_init(&req, hdev);
2096
2097 memset(&cp, 0, sizeof(cp));
2098 memcpy(&cp.lap, lap, sizeof(cp.lap));
2099 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2100 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2101
2102 hci_dev_lock(hdev);
2103
2104 hci_inquiry_cache_flush(hdev);
2105
2106 err = hci_req_run(&req, inquiry_complete);
2107 if (err) {
2108 BT_ERR("Inquiry request failed: err %d", err);
2109 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2110 }
2111
2112 hci_dev_unlock(hdev);
2113 break;
2114 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002115}
2116
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002117static void le_scan_disable_work(struct work_struct *work)
2118{
2119 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002120 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002121 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002122 struct hci_request req;
2123 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002124
2125 BT_DBG("%s", hdev->name);
2126
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002127 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002128
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002129 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002130 cp.enable = LE_SCAN_DISABLE;
2131 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002132
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002133 err = hci_req_run(&req, le_scan_disable_work_complete);
2134 if (err)
2135 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002136}
2137
David Herrmann9be0dab2012-04-22 14:39:57 +02002138/* Alloc HCI device */
2139struct hci_dev *hci_alloc_dev(void)
2140{
2141 struct hci_dev *hdev;
2142
2143 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2144 if (!hdev)
2145 return NULL;
2146
David Herrmannb1b813d2012-04-22 14:39:58 +02002147 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2148 hdev->esco_type = (ESCO_HV1);
2149 hdev->link_mode = (HCI_LM_ACCEPT);
2150 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002151 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2152 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002153
David Herrmannb1b813d2012-04-22 14:39:58 +02002154 hdev->sniff_max_interval = 800;
2155 hdev->sniff_min_interval = 80;
2156
2157 mutex_init(&hdev->lock);
2158 mutex_init(&hdev->req_lock);
2159
2160 INIT_LIST_HEAD(&hdev->mgmt_pending);
2161 INIT_LIST_HEAD(&hdev->blacklist);
2162 INIT_LIST_HEAD(&hdev->uuids);
2163 INIT_LIST_HEAD(&hdev->link_keys);
2164 INIT_LIST_HEAD(&hdev->long_term_keys);
2165 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002166 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002167
2168 INIT_WORK(&hdev->rx_work, hci_rx_work);
2169 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2170 INIT_WORK(&hdev->tx_work, hci_tx_work);
2171 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002172
David Herrmannb1b813d2012-04-22 14:39:58 +02002173 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2174 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2175 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2176
David Herrmannb1b813d2012-04-22 14:39:58 +02002177 skb_queue_head_init(&hdev->rx_q);
2178 skb_queue_head_init(&hdev->cmd_q);
2179 skb_queue_head_init(&hdev->raw_q);
2180
2181 init_waitqueue_head(&hdev->req_wait_q);
2182
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002183 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002184
David Herrmannb1b813d2012-04-22 14:39:58 +02002185 hci_init_sysfs(hdev);
2186 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002187
2188 return hdev;
2189}
2190EXPORT_SYMBOL(hci_alloc_dev);
2191
2192/* Free HCI device */
2193void hci_free_dev(struct hci_dev *hdev)
2194{
David Herrmann9be0dab2012-04-22 14:39:57 +02002195 /* will free via device release */
2196 put_device(&hdev->dev);
2197}
2198EXPORT_SYMBOL(hci_free_dev);
2199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200/* Register HCI device */
2201int hci_register_dev(struct hci_dev *hdev)
2202{
David Herrmannb1b813d2012-04-22 14:39:58 +02002203 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
David Herrmann010666a2012-01-07 15:47:07 +01002205 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 return -EINVAL;
2207
Mat Martineau08add512011-11-02 16:18:36 -07002208 /* Do not allow HCI_AMP devices to register at index 0,
2209 * so the index can be used as the AMP controller ID.
2210 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002211 switch (hdev->dev_type) {
2212 case HCI_BREDR:
2213 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2214 break;
2215 case HCI_AMP:
2216 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2217 break;
2218 default:
2219 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002221
Sasha Levin3df92b32012-05-27 22:36:56 +02002222 if (id < 0)
2223 return id;
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 sprintf(hdev->name, "hci%d", id);
2226 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002227
2228 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2229
Kees Cookd8537542013-07-03 15:04:57 -07002230 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2231 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002232 if (!hdev->workqueue) {
2233 error = -ENOMEM;
2234 goto err;
2235 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002236
Kees Cookd8537542013-07-03 15:04:57 -07002237 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2238 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002239 if (!hdev->req_workqueue) {
2240 destroy_workqueue(hdev->workqueue);
2241 error = -ENOMEM;
2242 goto err;
2243 }
2244
David Herrmann33ca9542011-10-08 14:58:49 +02002245 error = hci_add_sysfs(hdev);
2246 if (error < 0)
2247 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002249 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002250 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2251 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002252 if (hdev->rfkill) {
2253 if (rfkill_register(hdev->rfkill) < 0) {
2254 rfkill_destroy(hdev->rfkill);
2255 hdev->rfkill = NULL;
2256 }
2257 }
2258
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002259 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002260
2261 if (hdev->dev_type != HCI_AMP)
2262 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2263
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002264 write_lock(&hci_dev_list_lock);
2265 list_add(&hdev->list, &hci_dev_list);
2266 write_unlock(&hci_dev_list_lock);
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002269 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Johan Hedberg19202572013-01-14 22:33:51 +02002271 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002274
David Herrmann33ca9542011-10-08 14:58:49 +02002275err_wqueue:
2276 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002277 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002278err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002279 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002280
David Herrmann33ca9542011-10-08 14:58:49 +02002281 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282}
2283EXPORT_SYMBOL(hci_register_dev);
2284
2285/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002286void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287{
Sasha Levin3df92b32012-05-27 22:36:56 +02002288 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002289
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002290 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
Johan Hovold94324962012-03-15 14:48:41 +01002292 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2293
Sasha Levin3df92b32012-05-27 22:36:56 +02002294 id = hdev->id;
2295
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002296 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002298 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300 hci_dev_do_close(hdev);
2301
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302302 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002303 kfree_skb(hdev->reassembly[i]);
2304
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002305 cancel_work_sync(&hdev->power_on);
2306
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002307 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002308 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002309 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002310 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002311 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002312 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002313
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002314 /* mgmt_index_removed should take care of emptying the
2315 * pending list */
2316 BUG_ON(!list_empty(&hdev->mgmt_pending));
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 hci_notify(hdev, HCI_DEV_UNREG);
2319
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002320 if (hdev->rfkill) {
2321 rfkill_unregister(hdev->rfkill);
2322 rfkill_destroy(hdev->rfkill);
2323 }
2324
David Herrmannce242972011-10-08 14:58:48 +02002325 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002326
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002327 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002328 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002329
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002330 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002331 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002332 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002333 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002334 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002335 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002336 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002337
David Herrmanndc946bd2012-01-07 15:47:24 +01002338 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002339
2340 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341}
2342EXPORT_SYMBOL(hci_unregister_dev);
2343
2344/* Suspend HCI device */
2345int hci_suspend_dev(struct hci_dev *hdev)
2346{
2347 hci_notify(hdev, HCI_DEV_SUSPEND);
2348 return 0;
2349}
2350EXPORT_SYMBOL(hci_suspend_dev);
2351
2352/* Resume HCI device */
2353int hci_resume_dev(struct hci_dev *hdev)
2354{
2355 hci_notify(hdev, HCI_DEV_RESUME);
2356 return 0;
2357}
2358EXPORT_SYMBOL(hci_resume_dev);
2359
Marcel Holtmann76bca882009-11-18 00:40:39 +01002360/* Receive frame from HCI drivers */
2361int hci_recv_frame(struct sk_buff *skb)
2362{
2363 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2364 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002365 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002366 kfree_skb(skb);
2367 return -ENXIO;
2368 }
2369
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002370 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002371 bt_cb(skb)->incoming = 1;
2372
2373 /* Time stamp */
2374 __net_timestamp(skb);
2375
Marcel Holtmann76bca882009-11-18 00:40:39 +01002376 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002377 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002378
Marcel Holtmann76bca882009-11-18 00:40:39 +01002379 return 0;
2380}
2381EXPORT_SYMBOL(hci_recv_frame);
2382
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302383static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002384 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302385{
2386 int len = 0;
2387 int hlen = 0;
2388 int remain = count;
2389 struct sk_buff *skb;
2390 struct bt_skb_cb *scb;
2391
2392 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002393 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302394 return -EILSEQ;
2395
2396 skb = hdev->reassembly[index];
2397
2398 if (!skb) {
2399 switch (type) {
2400 case HCI_ACLDATA_PKT:
2401 len = HCI_MAX_FRAME_SIZE;
2402 hlen = HCI_ACL_HDR_SIZE;
2403 break;
2404 case HCI_EVENT_PKT:
2405 len = HCI_MAX_EVENT_SIZE;
2406 hlen = HCI_EVENT_HDR_SIZE;
2407 break;
2408 case HCI_SCODATA_PKT:
2409 len = HCI_MAX_SCO_SIZE;
2410 hlen = HCI_SCO_HDR_SIZE;
2411 break;
2412 }
2413
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002414 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302415 if (!skb)
2416 return -ENOMEM;
2417
2418 scb = (void *) skb->cb;
2419 scb->expect = hlen;
2420 scb->pkt_type = type;
2421
2422 skb->dev = (void *) hdev;
2423 hdev->reassembly[index] = skb;
2424 }
2425
2426 while (count) {
2427 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002428 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302429
2430 memcpy(skb_put(skb, len), data, len);
2431
2432 count -= len;
2433 data += len;
2434 scb->expect -= len;
2435 remain = count;
2436
2437 switch (type) {
2438 case HCI_EVENT_PKT:
2439 if (skb->len == HCI_EVENT_HDR_SIZE) {
2440 struct hci_event_hdr *h = hci_event_hdr(skb);
2441 scb->expect = h->plen;
2442
2443 if (skb_tailroom(skb) < scb->expect) {
2444 kfree_skb(skb);
2445 hdev->reassembly[index] = NULL;
2446 return -ENOMEM;
2447 }
2448 }
2449 break;
2450
2451 case HCI_ACLDATA_PKT:
2452 if (skb->len == HCI_ACL_HDR_SIZE) {
2453 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2454 scb->expect = __le16_to_cpu(h->dlen);
2455
2456 if (skb_tailroom(skb) < scb->expect) {
2457 kfree_skb(skb);
2458 hdev->reassembly[index] = NULL;
2459 return -ENOMEM;
2460 }
2461 }
2462 break;
2463
2464 case HCI_SCODATA_PKT:
2465 if (skb->len == HCI_SCO_HDR_SIZE) {
2466 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2467 scb->expect = h->dlen;
2468
2469 if (skb_tailroom(skb) < scb->expect) {
2470 kfree_skb(skb);
2471 hdev->reassembly[index] = NULL;
2472 return -ENOMEM;
2473 }
2474 }
2475 break;
2476 }
2477
2478 if (scb->expect == 0) {
2479 /* Complete frame */
2480
2481 bt_cb(skb)->pkt_type = type;
2482 hci_recv_frame(skb);
2483
2484 hdev->reassembly[index] = NULL;
2485 return remain;
2486 }
2487 }
2488
2489 return remain;
2490}
2491
Marcel Holtmannef222012007-07-11 06:42:04 +02002492int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2493{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302494 int rem = 0;
2495
Marcel Holtmannef222012007-07-11 06:42:04 +02002496 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2497 return -EILSEQ;
2498
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002499 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002500 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302501 if (rem < 0)
2502 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002503
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302504 data += (count - rem);
2505 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002506 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002507
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302508 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002509}
2510EXPORT_SYMBOL(hci_recv_fragment);
2511
Suraj Sumangala99811512010-07-14 13:02:19 +05302512#define STREAM_REASSEMBLY 0
2513
2514int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2515{
2516 int type;
2517 int rem = 0;
2518
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002519 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302520 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2521
2522 if (!skb) {
2523 struct { char type; } *pkt;
2524
2525 /* Start of the frame */
2526 pkt = data;
2527 type = pkt->type;
2528
2529 data++;
2530 count--;
2531 } else
2532 type = bt_cb(skb)->pkt_type;
2533
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002534 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002535 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302536 if (rem < 0)
2537 return rem;
2538
2539 data += (count - rem);
2540 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002541 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302542
2543 return rem;
2544}
2545EXPORT_SYMBOL(hci_recv_stream_fragment);
2546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547/* ---- Interface to upper protocols ---- */
2548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549int hci_register_cb(struct hci_cb *cb)
2550{
2551 BT_DBG("%p name %s", cb, cb->name);
2552
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002553 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002555 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
2557 return 0;
2558}
2559EXPORT_SYMBOL(hci_register_cb);
2560
2561int hci_unregister_cb(struct hci_cb *cb)
2562{
2563 BT_DBG("%p name %s", cb, cb->name);
2564
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002565 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002567 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568
2569 return 0;
2570}
2571EXPORT_SYMBOL(hci_unregister_cb);
2572
2573static int hci_send_frame(struct sk_buff *skb)
2574{
2575 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2576
2577 if (!hdev) {
2578 kfree_skb(skb);
2579 return -ENODEV;
2580 }
2581
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002582 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002584 /* Time stamp */
2585 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002587 /* Send copy to monitor */
2588 hci_send_to_monitor(hdev, skb);
2589
2590 if (atomic_read(&hdev->promisc)) {
2591 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002592 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 }
2594
2595 /* Get rid of skb owner, prior to sending to the driver. */
2596 skb_orphan(skb);
2597
2598 return hdev->send(skb);
2599}
2600
Johan Hedberg3119ae92013-03-05 20:37:44 +02002601void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2602{
2603 skb_queue_head_init(&req->cmd_q);
2604 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002605 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002606}
2607
2608int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2609{
2610 struct hci_dev *hdev = req->hdev;
2611 struct sk_buff *skb;
2612 unsigned long flags;
2613
2614 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2615
Andre Guedes5d73e032013-03-08 11:20:16 -03002616 /* If an error occured during request building, remove all HCI
2617 * commands queued on the HCI request queue.
2618 */
2619 if (req->err) {
2620 skb_queue_purge(&req->cmd_q);
2621 return req->err;
2622 }
2623
Johan Hedberg3119ae92013-03-05 20:37:44 +02002624 /* Do not allow empty requests */
2625 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002626 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002627
2628 skb = skb_peek_tail(&req->cmd_q);
2629 bt_cb(skb)->req.complete = complete;
2630
2631 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2632 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2633 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2634
2635 queue_work(hdev->workqueue, &hdev->cmd_work);
2636
2637 return 0;
2638}
2639
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002640static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002641 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642{
2643 int len = HCI_COMMAND_HDR_SIZE + plen;
2644 struct hci_command_hdr *hdr;
2645 struct sk_buff *skb;
2646
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002648 if (!skb)
2649 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
2651 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002652 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 hdr->plen = plen;
2654
2655 if (plen)
2656 memcpy(skb_put(skb, plen), param, plen);
2657
2658 BT_DBG("skb len %d", skb->len);
2659
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002660 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002662
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002663 return skb;
2664}
2665
2666/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002667int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2668 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002669{
2670 struct sk_buff *skb;
2671
2672 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2673
2674 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2675 if (!skb) {
2676 BT_ERR("%s no memory for command", hdev->name);
2677 return -ENOMEM;
2678 }
2679
Johan Hedberg11714b32013-03-05 20:37:47 +02002680 /* Stand-alone HCI commands must be flaged as
2681 * single-command requests.
2682 */
2683 bt_cb(skb)->req.start = true;
2684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002686 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
2688 return 0;
2689}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Johan Hedberg71c76a12013-03-05 20:37:46 +02002691/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002692void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2693 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002694{
2695 struct hci_dev *hdev = req->hdev;
2696 struct sk_buff *skb;
2697
2698 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2699
Andre Guedes34739c12013-03-08 11:20:18 -03002700 /* If an error occured during request building, there is no point in
2701 * queueing the HCI command. We can simply return.
2702 */
2703 if (req->err)
2704 return;
2705
Johan Hedberg71c76a12013-03-05 20:37:46 +02002706 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2707 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002708 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2709 hdev->name, opcode);
2710 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002711 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002712 }
2713
2714 if (skb_queue_empty(&req->cmd_q))
2715 bt_cb(skb)->req.start = true;
2716
Johan Hedberg02350a72013-04-03 21:50:29 +03002717 bt_cb(skb)->req.event = event;
2718
Johan Hedberg71c76a12013-03-05 20:37:46 +02002719 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002720}
2721
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002722void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2723 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002724{
2725 hci_req_add_ev(req, opcode, plen, param, 0);
2726}
2727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002729void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730{
2731 struct hci_command_hdr *hdr;
2732
2733 if (!hdev->sent_cmd)
2734 return NULL;
2735
2736 hdr = (void *) hdev->sent_cmd->data;
2737
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002738 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 return NULL;
2740
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002741 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2744}
2745
2746/* Send ACL data */
2747static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2748{
2749 struct hci_acl_hdr *hdr;
2750 int len = skb->len;
2751
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002752 skb_push(skb, HCI_ACL_HDR_SIZE);
2753 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002754 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002755 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2756 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757}
2758
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002759static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002760 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002762 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 struct hci_dev *hdev = conn->hdev;
2764 struct sk_buff *list;
2765
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002766 skb->len = skb_headlen(skb);
2767 skb->data_len = 0;
2768
2769 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002770
2771 switch (hdev->dev_type) {
2772 case HCI_BREDR:
2773 hci_add_acl_hdr(skb, conn->handle, flags);
2774 break;
2775 case HCI_AMP:
2776 hci_add_acl_hdr(skb, chan->handle, flags);
2777 break;
2778 default:
2779 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2780 return;
2781 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002782
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002783 list = skb_shinfo(skb)->frag_list;
2784 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 /* Non fragmented */
2786 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2787
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002788 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 } else {
2790 /* Fragmented */
2791 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2792
2793 skb_shinfo(skb)->frag_list = NULL;
2794
2795 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002796 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002798 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002799
2800 flags &= ~ACL_START;
2801 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 do {
2803 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002806 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002807 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
2809 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2810
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002811 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 } while (list);
2813
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002814 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002816}
2817
2818void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2819{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002820 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002821
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002822 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002823
2824 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002825
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002826 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002828 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
2831/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002832void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833{
2834 struct hci_dev *hdev = conn->hdev;
2835 struct hci_sco_hdr hdr;
2836
2837 BT_DBG("%s len %d", hdev->name, skb->len);
2838
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002839 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 hdr.dlen = skb->len;
2841
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002842 skb_push(skb, HCI_SCO_HDR_SIZE);
2843 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002844 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002847 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002848
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002850 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
2853/* ---- HCI TX task (outgoing data) ---- */
2854
2855/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002856static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2857 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858{
2859 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002860 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002861 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002863 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002865
2866 rcu_read_lock();
2867
2868 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002869 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002871
2872 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2873 continue;
2874
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 num++;
2876
2877 if (c->sent < min) {
2878 min = c->sent;
2879 conn = c;
2880 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002881
2882 if (hci_conn_num(hdev, type) == num)
2883 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 }
2885
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002886 rcu_read_unlock();
2887
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002889 int cnt, q;
2890
2891 switch (conn->type) {
2892 case ACL_LINK:
2893 cnt = hdev->acl_cnt;
2894 break;
2895 case SCO_LINK:
2896 case ESCO_LINK:
2897 cnt = hdev->sco_cnt;
2898 break;
2899 case LE_LINK:
2900 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2901 break;
2902 default:
2903 cnt = 0;
2904 BT_ERR("Unknown link type");
2905 }
2906
2907 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 *quote = q ? q : 1;
2909 } else
2910 *quote = 0;
2911
2912 BT_DBG("conn %p quote %d", conn, *quote);
2913 return conn;
2914}
2915
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002916static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917{
2918 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002919 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
Ville Tervobae1f5d92011-02-10 22:38:53 -03002921 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002923 rcu_read_lock();
2924
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002926 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002927 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002928 BT_ERR("%s killing stalled connection %pMR",
2929 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002930 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 }
2932 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002933
2934 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935}
2936
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002937static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2938 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002939{
2940 struct hci_conn_hash *h = &hdev->conn_hash;
2941 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002942 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002943 struct hci_conn *conn;
2944 int cnt, q, conn_num = 0;
2945
2946 BT_DBG("%s", hdev->name);
2947
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002948 rcu_read_lock();
2949
2950 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002951 struct hci_chan *tmp;
2952
2953 if (conn->type != type)
2954 continue;
2955
2956 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2957 continue;
2958
2959 conn_num++;
2960
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002961 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002962 struct sk_buff *skb;
2963
2964 if (skb_queue_empty(&tmp->data_q))
2965 continue;
2966
2967 skb = skb_peek(&tmp->data_q);
2968 if (skb->priority < cur_prio)
2969 continue;
2970
2971 if (skb->priority > cur_prio) {
2972 num = 0;
2973 min = ~0;
2974 cur_prio = skb->priority;
2975 }
2976
2977 num++;
2978
2979 if (conn->sent < min) {
2980 min = conn->sent;
2981 chan = tmp;
2982 }
2983 }
2984
2985 if (hci_conn_num(hdev, type) == conn_num)
2986 break;
2987 }
2988
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002989 rcu_read_unlock();
2990
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002991 if (!chan)
2992 return NULL;
2993
2994 switch (chan->conn->type) {
2995 case ACL_LINK:
2996 cnt = hdev->acl_cnt;
2997 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002998 case AMP_LINK:
2999 cnt = hdev->block_cnt;
3000 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003001 case SCO_LINK:
3002 case ESCO_LINK:
3003 cnt = hdev->sco_cnt;
3004 break;
3005 case LE_LINK:
3006 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3007 break;
3008 default:
3009 cnt = 0;
3010 BT_ERR("Unknown link type");
3011 }
3012
3013 q = cnt / num;
3014 *quote = q ? q : 1;
3015 BT_DBG("chan %p quote %d", chan, *quote);
3016 return chan;
3017}
3018
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003019static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3020{
3021 struct hci_conn_hash *h = &hdev->conn_hash;
3022 struct hci_conn *conn;
3023 int num = 0;
3024
3025 BT_DBG("%s", hdev->name);
3026
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003027 rcu_read_lock();
3028
3029 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003030 struct hci_chan *chan;
3031
3032 if (conn->type != type)
3033 continue;
3034
3035 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3036 continue;
3037
3038 num++;
3039
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003040 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003041 struct sk_buff *skb;
3042
3043 if (chan->sent) {
3044 chan->sent = 0;
3045 continue;
3046 }
3047
3048 if (skb_queue_empty(&chan->data_q))
3049 continue;
3050
3051 skb = skb_peek(&chan->data_q);
3052 if (skb->priority >= HCI_PRIO_MAX - 1)
3053 continue;
3054
3055 skb->priority = HCI_PRIO_MAX - 1;
3056
3057 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003058 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003059 }
3060
3061 if (hci_conn_num(hdev, type) == num)
3062 break;
3063 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003064
3065 rcu_read_unlock();
3066
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003067}
3068
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003069static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3070{
3071 /* Calculate count of blocks used by this packet */
3072 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3073}
3074
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003075static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 if (!test_bit(HCI_RAW, &hdev->flags)) {
3078 /* ACL tx timeout must be longer than maximum
3079 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003080 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003081 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003082 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003084}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003086static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003087{
3088 unsigned int cnt = hdev->acl_cnt;
3089 struct hci_chan *chan;
3090 struct sk_buff *skb;
3091 int quote;
3092
3093 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003094
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003095 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003096 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003097 u32 priority = (skb_peek(&chan->data_q))->priority;
3098 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003100 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003101
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003102 /* Stop if priority has changed */
3103 if (skb->priority < priority)
3104 break;
3105
3106 skb = skb_dequeue(&chan->data_q);
3107
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003108 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003109 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003110
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 hci_send_frame(skb);
3112 hdev->acl_last_tx = jiffies;
3113
3114 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003115 chan->sent++;
3116 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 }
3118 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003119
3120 if (cnt != hdev->acl_cnt)
3121 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122}
3123
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003124static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003125{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003126 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003127 struct hci_chan *chan;
3128 struct sk_buff *skb;
3129 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003130 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003131
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003132 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003133
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003134 BT_DBG("%s", hdev->name);
3135
3136 if (hdev->dev_type == HCI_AMP)
3137 type = AMP_LINK;
3138 else
3139 type = ACL_LINK;
3140
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003141 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003142 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003143 u32 priority = (skb_peek(&chan->data_q))->priority;
3144 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3145 int blocks;
3146
3147 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003148 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003149
3150 /* Stop if priority has changed */
3151 if (skb->priority < priority)
3152 break;
3153
3154 skb = skb_dequeue(&chan->data_q);
3155
3156 blocks = __get_blocks(hdev, skb);
3157 if (blocks > hdev->block_cnt)
3158 return;
3159
3160 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003161 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003162
3163 hci_send_frame(skb);
3164 hdev->acl_last_tx = jiffies;
3165
3166 hdev->block_cnt -= blocks;
3167 quote -= blocks;
3168
3169 chan->sent += blocks;
3170 chan->conn->sent += blocks;
3171 }
3172 }
3173
3174 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003175 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003176}
3177
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003178static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003179{
3180 BT_DBG("%s", hdev->name);
3181
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003182 /* No ACL link over BR/EDR controller */
3183 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3184 return;
3185
3186 /* No AMP link over AMP controller */
3187 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003188 return;
3189
3190 switch (hdev->flow_ctl_mode) {
3191 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3192 hci_sched_acl_pkt(hdev);
3193 break;
3194
3195 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3196 hci_sched_acl_blk(hdev);
3197 break;
3198 }
3199}
3200
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003202static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203{
3204 struct hci_conn *conn;
3205 struct sk_buff *skb;
3206 int quote;
3207
3208 BT_DBG("%s", hdev->name);
3209
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003210 if (!hci_conn_num(hdev, SCO_LINK))
3211 return;
3212
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3214 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3215 BT_DBG("skb %p len %d", skb, skb->len);
3216 hci_send_frame(skb);
3217
3218 conn->sent++;
3219 if (conn->sent == ~0)
3220 conn->sent = 0;
3221 }
3222 }
3223}
3224
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003225static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003226{
3227 struct hci_conn *conn;
3228 struct sk_buff *skb;
3229 int quote;
3230
3231 BT_DBG("%s", hdev->name);
3232
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003233 if (!hci_conn_num(hdev, ESCO_LINK))
3234 return;
3235
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003236 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3237 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003238 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3239 BT_DBG("skb %p len %d", skb, skb->len);
3240 hci_send_frame(skb);
3241
3242 conn->sent++;
3243 if (conn->sent == ~0)
3244 conn->sent = 0;
3245 }
3246 }
3247}
3248
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003249static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003250{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003251 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003252 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003253 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003254
3255 BT_DBG("%s", hdev->name);
3256
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003257 if (!hci_conn_num(hdev, LE_LINK))
3258 return;
3259
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003260 if (!test_bit(HCI_RAW, &hdev->flags)) {
3261 /* LE tx timeout must be longer than maximum
3262 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003263 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003264 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003265 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003266 }
3267
3268 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003269 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003270 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003271 u32 priority = (skb_peek(&chan->data_q))->priority;
3272 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003273 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003274 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003275
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003276 /* Stop if priority has changed */
3277 if (skb->priority < priority)
3278 break;
3279
3280 skb = skb_dequeue(&chan->data_q);
3281
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003282 hci_send_frame(skb);
3283 hdev->le_last_tx = jiffies;
3284
3285 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003286 chan->sent++;
3287 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003288 }
3289 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003290
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003291 if (hdev->le_pkts)
3292 hdev->le_cnt = cnt;
3293 else
3294 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003295
3296 if (cnt != tmp)
3297 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003298}
3299
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003300static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003302 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 struct sk_buff *skb;
3304
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003305 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003306 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
Marcel Holtmann52de5992013-09-03 18:08:38 -07003308 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3309 /* Schedule queues and send stuff to HCI driver */
3310 hci_sched_acl(hdev);
3311 hci_sched_sco(hdev);
3312 hci_sched_esco(hdev);
3313 hci_sched_le(hdev);
3314 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003315
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 /* Send next queued raw (unknown type) packet */
3317 while ((skb = skb_dequeue(&hdev->raw_q)))
3318 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319}
3320
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003321/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
3323/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003324static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325{
3326 struct hci_acl_hdr *hdr = (void *) skb->data;
3327 struct hci_conn *conn;
3328 __u16 handle, flags;
3329
3330 skb_pull(skb, HCI_ACL_HDR_SIZE);
3331
3332 handle = __le16_to_cpu(hdr->handle);
3333 flags = hci_flags(handle);
3334 handle = hci_handle(handle);
3335
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003336 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003337 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338
3339 hdev->stat.acl_rx++;
3340
3341 hci_dev_lock(hdev);
3342 conn = hci_conn_hash_lookup_handle(hdev, handle);
3343 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003344
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003346 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003347
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003349 l2cap_recv_acldata(conn, skb, flags);
3350 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003352 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
3356 kfree_skb(skb);
3357}
3358
3359/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003360static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361{
3362 struct hci_sco_hdr *hdr = (void *) skb->data;
3363 struct hci_conn *conn;
3364 __u16 handle;
3365
3366 skb_pull(skb, HCI_SCO_HDR_SIZE);
3367
3368 handle = __le16_to_cpu(hdr->handle);
3369
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003370 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
3372 hdev->stat.sco_rx++;
3373
3374 hci_dev_lock(hdev);
3375 conn = hci_conn_hash_lookup_handle(hdev, handle);
3376 hci_dev_unlock(hdev);
3377
3378 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003380 sco_recv_scodata(conn, skb);
3381 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003383 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003384 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 }
3386
3387 kfree_skb(skb);
3388}
3389
Johan Hedberg9238f362013-03-05 20:37:48 +02003390static bool hci_req_is_complete(struct hci_dev *hdev)
3391{
3392 struct sk_buff *skb;
3393
3394 skb = skb_peek(&hdev->cmd_q);
3395 if (!skb)
3396 return true;
3397
3398 return bt_cb(skb)->req.start;
3399}
3400
Johan Hedberg42c6b122013-03-05 20:37:49 +02003401static void hci_resend_last(struct hci_dev *hdev)
3402{
3403 struct hci_command_hdr *sent;
3404 struct sk_buff *skb;
3405 u16 opcode;
3406
3407 if (!hdev->sent_cmd)
3408 return;
3409
3410 sent = (void *) hdev->sent_cmd->data;
3411 opcode = __le16_to_cpu(sent->opcode);
3412 if (opcode == HCI_OP_RESET)
3413 return;
3414
3415 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3416 if (!skb)
3417 return;
3418
3419 skb_queue_head(&hdev->cmd_q, skb);
3420 queue_work(hdev->workqueue, &hdev->cmd_work);
3421}
3422
Johan Hedberg9238f362013-03-05 20:37:48 +02003423void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3424{
3425 hci_req_complete_t req_complete = NULL;
3426 struct sk_buff *skb;
3427 unsigned long flags;
3428
3429 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3430
Johan Hedberg42c6b122013-03-05 20:37:49 +02003431 /* If the completed command doesn't match the last one that was
3432 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003433 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003434 if (!hci_sent_cmd_data(hdev, opcode)) {
3435 /* Some CSR based controllers generate a spontaneous
3436 * reset complete event during init and any pending
3437 * command will never be completed. In such a case we
3438 * need to resend whatever was the last sent
3439 * command.
3440 */
3441 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3442 hci_resend_last(hdev);
3443
Johan Hedberg9238f362013-03-05 20:37:48 +02003444 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003445 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003446
3447 /* If the command succeeded and there's still more commands in
3448 * this request the request is not yet complete.
3449 */
3450 if (!status && !hci_req_is_complete(hdev))
3451 return;
3452
3453 /* If this was the last command in a request the complete
3454 * callback would be found in hdev->sent_cmd instead of the
3455 * command queue (hdev->cmd_q).
3456 */
3457 if (hdev->sent_cmd) {
3458 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003459
3460 if (req_complete) {
3461 /* We must set the complete callback to NULL to
3462 * avoid calling the callback more than once if
3463 * this function gets called again.
3464 */
3465 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3466
Johan Hedberg9238f362013-03-05 20:37:48 +02003467 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003468 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003469 }
3470
3471 /* Remove all pending commands belonging to this request */
3472 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3473 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3474 if (bt_cb(skb)->req.start) {
3475 __skb_queue_head(&hdev->cmd_q, skb);
3476 break;
3477 }
3478
3479 req_complete = bt_cb(skb)->req.complete;
3480 kfree_skb(skb);
3481 }
3482 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3483
3484call_complete:
3485 if (req_complete)
3486 req_complete(hdev, status);
3487}
3488
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003489static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003491 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 struct sk_buff *skb;
3493
3494 BT_DBG("%s", hdev->name);
3495
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003497 /* Send copy to monitor */
3498 hci_send_to_monitor(hdev, skb);
3499
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 if (atomic_read(&hdev->promisc)) {
3501 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003502 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 }
3504
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003505 if (test_bit(HCI_RAW, &hdev->flags) ||
3506 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 kfree_skb(skb);
3508 continue;
3509 }
3510
3511 if (test_bit(HCI_INIT, &hdev->flags)) {
3512 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003513 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 case HCI_ACLDATA_PKT:
3515 case HCI_SCODATA_PKT:
3516 kfree_skb(skb);
3517 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 }
3520
3521 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003522 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003524 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 hci_event_packet(hdev, skb);
3526 break;
3527
3528 case HCI_ACLDATA_PKT:
3529 BT_DBG("%s ACL data packet", hdev->name);
3530 hci_acldata_packet(hdev, skb);
3531 break;
3532
3533 case HCI_SCODATA_PKT:
3534 BT_DBG("%s SCO data packet", hdev->name);
3535 hci_scodata_packet(hdev, skb);
3536 break;
3537
3538 default:
3539 kfree_skb(skb);
3540 break;
3541 }
3542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543}
3544
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003545static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003547 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 struct sk_buff *skb;
3549
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003550 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3551 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003554 if (atomic_read(&hdev->cmd_cnt)) {
3555 skb = skb_dequeue(&hdev->cmd_q);
3556 if (!skb)
3557 return;
3558
Wei Yongjun7585b972009-02-25 18:29:52 +08003559 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003561 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003562 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 atomic_dec(&hdev->cmd_cnt);
3564 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003565 if (test_bit(HCI_RESET, &hdev->flags))
3566 del_timer(&hdev->cmd_timer);
3567 else
3568 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003569 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 } else {
3571 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003572 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 }
3574 }
3575}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003576
Andre Guedes31f79562012-04-24 21:02:53 -03003577u8 bdaddr_to_le(u8 bdaddr_type)
3578{
3579 switch (bdaddr_type) {
3580 case BDADDR_LE_PUBLIC:
3581 return ADDR_LE_DEV_PUBLIC;
3582
3583 default:
3584 /* Fallback to LE Random address type */
3585 return ADDR_LE_DEV_RANDOM;
3586 }
3587}